hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
5897ee7ad64b9d4ef046d353b9d38736b0e1c322.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAdd(&(gradInput[argmax]), z); } } } // 4d tensor B x D x H x W void adaptive_max_pool2d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("adaptive_max_pool2d_cuda", {output_arg, indices_arg, input_arg}); for (int64_t i = 0; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_max_pool2d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(output_size.size() == 2, "adaptive_max_pool2d: internal error: output_size.size() must be 2"); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_cuda", [&] { output.resize_({sizeD, osizeH, osizeW}); indices.resize_({sizeD, osizeH, osizeW}); scalar_t *input_data = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); }); } ); AT_CUDA_CHECK(hipGetLastError()); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_cuda", [&] { output.resize_({sizeB, sizeD, osizeH, osizeW}); indices.resize_({sizeB, sizeD, osizeH, osizeW}); scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); }); } ); AT_CUDA_CHECK(hipGetLastError()); } } void adaptive_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool2d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput.size(1); int64_t osizeW = gradOutput.size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.resize_as_(input); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } }); } ); AT_CUDA_CHECK(hipGetLastError()); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput.size(2); int64_t osizeW = gradOutput.size(3); gradInput.resize_as_(input); gradInput.zero_(); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptivemaxgradinput) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } }); } ); AT_CUDA_CHECK(hipGetLastError()); } } } // namespace std::tuple<Tensor&, Tensor&> adaptive_max_pool2d_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { adaptive_max_pool2d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> adaptive_max_pool2d_cuda( const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); adaptive_max_pool2d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& adaptive_max_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
5897ee7ad64b9d4ef046d353b9d38736b0e1c322.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)std::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)std::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAdd(&(gradInput[argmax]), z); } } } // 4d tensor B x D x H x W void adaptive_max_pool2d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input, "input", 3 }; checkAllSameGPU("adaptive_max_pool2d_cuda", {output_arg, indices_arg, input_arg}); for (int64_t i = 0; i < input.ndimension(); i++) { TORCH_CHECK(input.size(i) > 0, "adaptive_max_pool2d_cuda(): expected input to have non-empty spatial dimensions, " "but input has sizes ", input.sizes(), " with dimension ", i, " being " "empty"); } TORCH_CHECK((input.ndimension() == 3 || input.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); TORCH_CHECK(output_size.size() == 2, "adaptive_max_pool2d: internal error: output_size.size() must be 2"); int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_cuda", [&] { output.resize_({sizeD, osizeH, osizeW}); indices.resize_({sizeD, osizeH, osizeW}); scalar_t *input_data = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); }); } ); AT_CUDA_CHECK(cudaGetLastError()); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_cuda", [&] { output.resize_({sizeB, sizeD, osizeH, osizeW}); indices.resize_({sizeB, sizeD, osizeH, osizeW}); scalar_t *input_data = input_.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); }); } ); AT_CUDA_CHECK(cudaGetLastError()); } } void adaptive_max_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { TensorArg grad_input_arg{ gradInput, "gradInput", 1 }; TensorArg grad_output_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input, "input", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("adaptive_max_pool2d_out_cuda", {grad_input_arg, grad_output_arg, input_arg, indices_arg}); bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput = gradOutput_.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput.size(1); int64_t osizeW = gradOutput.size(2); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.resize_as_(input); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } }); } ); AT_CUDA_CHECK(cudaGetLastError()); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput.size(2); int64_t osizeW = gradOutput.size(3); gradInput.resize_as_(input); gradInput.zero_(); //bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "adaptive_max_pool2d_backward_cuda", [&] { scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB*sizeD, blocksH); dim3 threads(32, 8); if(atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } else { // run updateGradInput kernel, accumulate gradients atomically adaptivemaxgradinput <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>> ( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); } }); } ); AT_CUDA_CHECK(cudaGetLastError()); } } } // namespace std::tuple<Tensor&, Tensor&> adaptive_max_pool2d_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef output_size) { adaptive_max_pool2d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> adaptive_max_pool2d_cuda( const Tensor& input, IntArrayRef output_size) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); adaptive_max_pool2d_out_cuda_template( output, indices, input, output_size); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& adaptive_max_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } Tensor adaptive_max_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, const Tensor& indices) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); adaptive_max_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, indices); return gradInput; } } // at::native } // at
657ffd631c8e22b32b58c834301b6e34bd4c7255.hip
// !!! This is a file automatically generated by hipify!!! /** * Yuri Gorokhov * lab 5 - Modulus power of two */ #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #define ITERATIONS 100000 #define THREADS 32 #define POW 30 __global__ void kernel_mod(int); int main (void) { hipEvent_t start, stop; int input[POW]; float output[POW]; hipEventCreate(&start); hipEventCreate(&stop); for(int i = 0; i < POW; i++) { input[i] = pow(2,i); hipEventRecord(start,0); hipLaunchKernelGGL(( kernel_mod), dim3(1),dim3(THREADS), 0, 0, pow(2,i)); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&output[i], start, stop); } printf("["); for(int i = 0; i < POW; i++) { printf("%i, ", input[i]); } printf("\n["); for(int i = 0; i < POW; i++) { printf("%f, ", output[i]); } return 0; } __global__ void kernel_mod(int mod) { __shared__ float A[THREADS]; int temp; int target = threadIdx.x % mod; for(int i = 1; i <= ITERATIONS; i++) { temp = A[target]; } __syncthreads(); }
657ffd631c8e22b32b58c834301b6e34bd4c7255.cu
/** * Yuri Gorokhov * lab 5 - Modulus power of two */ #include <stdio.h> #include <cuda.h> #include <math.h> #define ITERATIONS 100000 #define THREADS 32 #define POW 30 __global__ void kernel_mod(int); int main (void) { cudaEvent_t start, stop; int input[POW]; float output[POW]; cudaEventCreate(&start); cudaEventCreate(&stop); for(int i = 0; i < POW; i++) { input[i] = pow(2,i); cudaEventRecord(start,0); kernel_mod<<<1,THREADS>>>(pow(2,i)); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&output[i], start, stop); } printf("["); for(int i = 0; i < POW; i++) { printf("%i, ", input[i]); } printf("\n["); for(int i = 0; i < POW; i++) { printf("%f, ", output[i]); } return 0; } __global__ void kernel_mod(int mod) { __shared__ float A[THREADS]; int temp; int target = threadIdx.x % mod; for(int i = 1; i <= ITERATIONS; i++) { temp = A[target]; } __syncthreads(); }
882b3c7a8d9146d630160969f2d71b29b4f105fc.hip
// !!! This is a file automatically generated by hipify!!! /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream); } #endif /* CUDA_DISABLER */
882b3c7a8d9146d630160969f2d71b29b4f105fc.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "row_filter.h" namespace filter { template void linearRow<short3, float3>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream); } #endif /* CUDA_DISABLER */
952949a2ee42516c65fe94ec24b679703d53df6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from magma_zmcsrcompressor_gpu.cu normal z -> c, Sun May 3 11:22:58 2015 @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_cmcsrgpu_kernel1( int num_rows, magmaFloatComplex *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, magmaFloatComplex *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex zero = MAGMA_C_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_cmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ){ int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_cmcsrgpu_kernel3( int num_rows, magmaFloatComplex *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, magmaFloatComplex *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; magmaFloatComplex zero = MAGMA_C_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param A magma_c_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_caux ********************************************************************/ extern "C" magma_int_t magma_cmcsrcompressor_gpu( magma_c_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_c_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_c_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are hipLaunchKernelGGL(( magma_cmcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue , A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); hipLaunchKernelGGL(( magma_cmcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue , A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_cmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back hipLaunchKernelGGL(( magma_cmcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue , A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_cmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_cmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_cmcsrcompressor_gpu( &dA, queue )); magma_cmfree( &dA, queue ); magma_cmfree( A, queue ); CHECK( magma_cmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_cmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_cmfree( &dA, queue ); magma_cmfree( &CSRA, queue ); } cleanup: magma_cmfree( &dA, queue ); magma_cmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
952949a2ee42516c65fe94ec24b679703d53df6e.cu
/* -- MAGMA (version 1.6.2) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date May 2015 @generated from magma_zmcsrcompressor_gpu.cu normal z -> c, Sun May 3 11:22:58 2015 @author Hartwig Anzt */ #include "common_magmasparse.h" #define BLOCK_SIZE1 256 #define BLOCK_SIZE2 1 // copy nonzeros into new structure __global__ void magma_cmcsrgpu_kernel1( int num_rows, magmaFloatComplex *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind, magmaFloatComplex *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaFloatComplex zero = MAGMA_C_ZERO; int start = A_rowptr[ row ]; int new_location = start; int end = A_rowptr[ row+1 ]; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ // B_val[new_location] = A_val[j]; // B_colind[new_location] = A_colind[j]; new_location++; } } // this is not a correctr rowpointer! this is nn_z in this row! B_rowptr[ row ] = new_location-start; } } // generate a valid rowpointer __global__ void magma_cmcsrgpu_kernel2( int num_rows, magma_index_t *B_rowptr, magma_index_t *A_rowptr ){ int idx = blockIdx.x*blockDim.x+threadIdx.x; int j, nnz = 0; if( idx == 0 ){ A_rowptr[ 0 ] = nnz; for( j=0; j<num_rows; j++ ){ nnz+=B_rowptr[ j ]; A_rowptr[ j+1 ] = nnz; } } } // copy new structure into original matrix __global__ void magma_cmcsrgpu_kernel3( int num_rows, magmaFloatComplex *B_val, magma_index_t *B_rowptr, magma_index_t *B_colind, magma_index_t *B2_rowptr, magmaFloatComplex *A_val, magma_index_t *A_rowptr, magma_index_t *A_colind ){ int row = blockIdx.x*blockDim.x+threadIdx.x; int j, new_location; if(row<num_rows){ new_location = A_rowptr[ row ]; int start = B2_rowptr[ row ]; int end = B2_rowptr[ row+1 ]; magmaFloatComplex zero = MAGMA_C_ZERO; for( j=start; j<end; j++ ){ if( A_val[j] != zero ){ B_val[new_location] = A_val[j]; B_colind[new_location] = A_colind[j]; new_location++; } // A_val[ j ] = B_val[ j ]; // A_colind[ j ] = B_colind[ j ]; } } } /** Purpose ------- Removes zeros in a CSR matrix. This is a GPU implementation of the CSR compressor. Arguments --------- @param A magma_c_matrix* input/output matrix @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_caux ********************************************************************/ extern "C" magma_int_t magma_cmcsrcompressor_gpu( magma_c_matrix *A, magma_queue_t queue ) { magma_int_t info = 0; magma_c_matrix B={Magma_CSR}, B2={Magma_CSR}; magma_c_matrix dA={Magma_CSR}, CSRA={Magma_CSR}; magma_index_t *cputmp = NULL; if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) { CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 )); CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 )); magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 ); dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) ); // copying the nonzeros into B and write in B.drow how many there are magma_cmcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue >>> ( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol ); // correct the row pointer dim3 grid2( 1, 1, 1); magma_cmcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue >>> ( A->num_rows, B.drow, A->drow ); // access the true number of nonzeros CHECK( magma_index_malloc_cpu( &cputmp, 1 )); magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 ); A->nnz = (magma_int_t) cputmp[0]; // reallocate with right size CHECK( magma_cmalloc( &B.dval, A->nnz )); CHECK( magma_index_malloc( &B.dcol, A->nnz )); // copy correct values back magma_cmcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue >>> ( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol ); magma_free( A->dcol ); magma_free( A->dval ); A->dcol = B.dcol; A->dval = B.dval; } else { magma_storage_t A_storage = A->storage_type; magma_location_t A_location = A->memory_location; CHECK( magma_cmconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue )); CHECK( magma_cmtransfer( *A, &dA, A->memory_location, Magma_DEV, queue )); CHECK( magma_cmcsrcompressor_gpu( &dA, queue )); magma_cmfree( &dA, queue ); magma_cmfree( A, queue ); CHECK( magma_cmtransfer( dA, &CSRA, Magma_DEV, A_location, queue )); CHECK( magma_cmconvert( CSRA, A, Magma_CSR, A_storage, queue )); magma_cmfree( &dA, queue ); magma_cmfree( &CSRA, queue ); } cleanup: magma_cmfree( &dA, queue ); magma_cmfree( &CSRA, queue ); magma_free( B2.drow ); magma_free( B.drow ); return info; }
9f2e27e9e14ea69b5bd6855d0c606ef48646597d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ #include <algorithm> #include <iomanip> #include <limits> #include "bfs.cuh" #include "graph.hpp" #include <utilities/error_utils.h> #include "bfs_kernels_hip.cuh" #include "traversal_common.cuh" #include "utilities/graph_utils.cuh" namespace cugraph { namespace detail { enum BFS_ALGO_STATE { TOPDOWN, BOTTOMUP }; template <typename IndexType> void BFS<IndexType>::setup() { // Determinism flag, false by default deterministic = false; // Working data // Each vertex can be in the frontier at most once // We will update frontier during the execution // We need the orig to reset frontier, or ALLOC_FREE_TRY original_frontier.resize(number_of_vertices); frontier = original_frontier.data().get(); // size of bitmaps for vertices vertices_bmap_size = (number_of_vertices / (8 * sizeof(int)) + 1); // ith bit of visited_bmap is set <=> ith vertex is visited visited_bmap.resize(vertices_bmap_size); // ith bit of isolated_bmap is set <=> degree of ith vertex = 0 isolated_bmap.resize(vertices_bmap_size); // vertices_degree[i] = degree of vertex i vertex_degree.resize(number_of_vertices); // We will need (n+1) ints buffer for two differents things (bottom up or top down) - sharing it // since those uses are mutually exclusive buffer_np1_1.resize(number_of_vertices + 1); buffer_np1_2.resize(number_of_vertices + 1); // Using buffers : top down // frontier_vertex_degree[i] is the degree of vertex frontier[i] frontier_vertex_degree = buffer_np1_1.data().get(); // exclusive sum of frontier_vertex_degree exclusive_sum_frontier_vertex_degree = buffer_np1_2.data().get(); // Using buffers : bottom up // contains list of unvisited vertices unvisited_queue = buffer_np1_1.data().get(); // size of the "last" unvisited queue : size_last_unvisited_queue // refers to the size of unvisited_queue // which may not be up to date (the queue may contains vertices that are now // visited) // We may leave vertices unvisited after bottom up main kernels - storing them // here left_unvisited_queue = buffer_np1_2.data().get(); // We use buckets of edges (32 edges per bucket for now, see exact macro in bfs_kernels). // frontier_vertex_degree_buckets_offsets[i] is the index k such as frontier[k] is the source of // the first edge of the bucket See top down kernels for more details exclusive_sum_frontier_vertex_buckets_offsets.resize( ((number_of_edges / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2)); // Init device-side counters // Those counters must be/can be reset at each bfs iteration // Keeping them adjacent in memory allow use call only one hipMemset - launch latency is the // current bottleneck d_counters_pad.resize(4); d_new_frontier_cnt = d_counters_pad.data().get(); d_mu = d_counters_pad.data().get() + 1; d_unvisited_cnt = d_counters_pad.data().get() + 2; d_left_unvisited_cnt = d_counters_pad.data().get() + 3; // Lets use this int* for the next 3 lines // Its dereferenced value is not initialized - so we dont care about what we // put in it IndexType *d_nisolated = d_new_frontier_cnt; hipMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream); // Computing isolated_bmap // Only dependent on graph - not source vertex - done once traversal::flag_isolated_vertices(number_of_vertices, isolated_bmap.data().get(), row_offsets, vertex_degree.data().get(), d_nisolated, stream); hipMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We need nisolated to be ready to use hipStreamSynchronize(stream); } template <typename IndexType> void BFS<IndexType>::configure(IndexType *_distances, IndexType *_predecessors, double *_sp_counters, int *_edge_mask) { distances = _distances; predecessors = _predecessors; edge_mask = _edge_mask; sp_counters = _sp_counters; useEdgeMask = (edge_mask != NULL); computeDistances = (distances != NULL); computePredecessors = (predecessors != NULL); // We need distances to use bottom up if (directed && !computeDistances) { distances_vals.resize(number_of_vertices); distances = distances_vals.data().get(); } // In case the shortest path counters is required, previous_bmap has to be allocated if (sp_counters) { previous_visited_bmap.resize(vertices_bmap_size); } } template <typename IndexType> void BFS<IndexType>::traverse(IndexType source_vertex) { // Init visited_bmap // If the graph is undirected, we not that // we will never discover isolated vertices (in degree = out degree = 0) // we avoid a lot of work by flagging them now // in g500 graphs they represent ~25% of total vertices // more than that for wiki and twitter graphs if (directed) { hipMemsetAsync(visited_bmap.data().get(), 0, vertices_bmap_size * sizeof(int), stream); } else { hipMemcpyAsync(visited_bmap.data().get(), isolated_bmap.data().get(), vertices_bmap_size * sizeof(int), hipMemcpyDeviceToDevice, stream); } // If needed, setting all vertices as undiscovered (inf distance) // We dont use computeDistances here // if the graph is undirected, we may need distances even if // computeDistances is false if (distances) traversal::fill_vec(distances, number_of_vertices, traversal::vec_t<IndexType>::max, stream); // If needed, setting all predecessors to non-existent (-1) if (computePredecessors) { hipMemsetAsync(predecessors, -1, number_of_vertices * sizeof(IndexType), stream); } if (sp_counters) { hipMemsetAsync(sp_counters, 0, number_of_vertices * sizeof(double), stream); double value = 1; hipMemcpyAsync(sp_counters + source_vertex, &value, sizeof(double), hipMemcpyHostToDevice); } // // Initial frontier // frontier = original_frontier.data().get(); if (distances) { hipMemsetAsync(&distances[source_vertex], 0, sizeof(IndexType), stream); } // Setting source_vertex as visited // There may be bit already set on that bmap (isolated vertices) - if the // graph is undirected int current_visited_bmap_source_vert = 0; if (!directed) { hipMemcpyAsync(&current_visited_bmap_source_vert, visited_bmap.data().get() + (source_vertex / INT_SIZE), sizeof(int), hipMemcpyDeviceToHost); // We need current_visited_bmap_source_vert hipStreamSynchronize(stream); } int m = (1 << (source_vertex % INT_SIZE)); // In that case, source is isolated, done now if (!directed && (m & current_visited_bmap_source_vert)) { // Init distances and predecessors are done, (cf Streamsync in previous if) return; } m |= current_visited_bmap_source_vert; hipMemcpyAsync(visited_bmap.data().get() + (source_vertex / INT_SIZE), &m, sizeof(int), hipMemcpyHostToDevice, stream); // Adding source_vertex to init frontier hipMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), hipMemcpyHostToDevice, stream); // mf : edges in frontier // nf : vertices in frontier // mu : edges undiscovered // nu : nodes undiscovered // lvl : current frontier's depth IndexType mf, nf, mu, nu; bool growing; IndexType lvl = 1; // Frontier has one vertex nf = 1; // all edges are undiscovered (by def isolated vertices have 0 edges) mu = number_of_edges; // all non isolated vertices are undiscovered (excepted source vertex, which is in frontier) // That number is wrong if source_vertex is also isolated - but it's not important nu = number_of_vertices - nisolated - nf; // Last frontier was 0, now it is 1 growing = true; IndexType size_last_left_unvisited_queue = number_of_vertices; // we just need value > 0 IndexType size_last_unvisited_queue = 0; // queue empty // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); hipMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We need mf hipStreamSynchronize(stream); // At first we know we have to use top down BFS_ALGO_STATE algo_state = TOPDOWN; // useDistances : we check if a vertex is a parent using distances in bottom up - distances become // working data undirected g : need parents to be in children's neighbors // In case the shortest path counters need to be computeed, the bottom_up approach cannot be used bool can_use_bottom_up = (!sp_counters && !directed && distances); while (nf > 0) { // Each vertices can appear only once in the frontierer array - we know it will fit new_frontier = frontier + nf; IndexType old_nf = nf; resetDevicePointers(); if (can_use_bottom_up) { // Choosing algo // Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf switch (algo_state) { case TOPDOWN: if (mf > mu / alpha) algo_state = BOTTOMUP; break; case BOTTOMUP: if (!growing && nf < number_of_vertices / beta) { // We need to prepare the switch back to top down // We couldnt keep track of mu during bottom up - because we dont know what mf is. // Computing mu here bfs_kernels::count_unvisited_edges(unvisited_queue, size_last_unvisited_queue, visited_bmap.data().get(), vertex_degree.data().get(), d_mu, stream); // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); hipMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), hipMemcpyDeviceToHost, stream); hipMemcpyAsync(&mu, d_mu, sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We will need mf and mu hipStreamSynchronize(stream); algo_state = TOPDOWN; } break; } } // Executing algo switch (algo_state) { case TOPDOWN: // This step is only required if sp_counters is not nullptr if (sp_counters) { hipMemcpyAsync(previous_visited_bmap.data().get(), visited_bmap.data().get(), vertices_bmap_size * sizeof(int), hipMemcpyDeviceToDevice, stream); // We need to copy the visited_bmap before doing the traversal hipStreamSynchronize(stream); } traversal::compute_bucket_offsets( exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets.data().get(), nf, mf, stream); bfs_kernels::frontier_expand(row_offsets, col_indices, frontier, nf, mf, lvl, new_frontier, d_new_frontier_cnt, exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets.data().get(), previous_visited_bmap.data().get(), visited_bmap.data().get(), distances, predecessors, sp_counters, edge_mask, isolated_bmap.data().get(), directed, stream, deterministic); mu -= mf; hipMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream); CUDA_CHECK_LAST(); // We need nf hipStreamSynchronize(stream); if (nf) { // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, new_frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); hipMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We need mf hipStreamSynchronize(stream); } break; case BOTTOMUP: bfs_kernels::fill_unvisited_queue(visited_bmap.data().get(), vertices_bmap_size, number_of_vertices, unvisited_queue, d_unvisited_cnt, stream, deterministic); size_last_unvisited_queue = nu; bfs_kernels::bottom_up_main(unvisited_queue, size_last_unvisited_queue, left_unvisited_queue, d_left_unvisited_cnt, visited_bmap.data().get(), row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); // The number of vertices left unvisited decreases // If it wasnt necessary last time, it wont be this time if (size_last_left_unvisited_queue) { hipMemcpyAsync(&size_last_left_unvisited_queue, d_left_unvisited_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream); CUDA_CHECK_LAST() // We need last_left_unvisited_size hipStreamSynchronize(stream); bfs_kernels::bottom_up_large(left_unvisited_queue, size_last_left_unvisited_queue, visited_bmap.data().get(), row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); } hipMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream); CUDA_CHECK_LAST() // We will need nf hipStreamSynchronize(stream); break; } // Updating undiscovered edges count nu -= nf; // Using new frontier frontier = new_frontier; growing = (nf > old_nf); ++lvl; } } template <typename IndexType> void BFS<IndexType>::resetDevicePointers() { hipMemsetAsync(d_counters_pad.data().get(), 0, 4 * sizeof(IndexType), stream); } template <typename IndexType> void BFS<IndexType>::clean() { // the vectors have a destructor that takes care of cleaning } template class BFS<int>; } // namespace detail // NOTE: SP counter increase extremely fast on large graph // It can easily reach 1e40~1e70 on GAP-road.mtx template <typename VT, typename ET, typename WT> void bfs(experimental::GraphCSRView<VT, ET, WT> const &graph, VT *distances, VT *predecessors, double *sp_counters, const VT start_vertex, bool directed) { CUGRAPH_EXPECTS(typeid(VT) == typeid(int), "Unsupported vertex id data type, please use int"); CUGRAPH_EXPECTS(typeid(ET) == typeid(int), "Unsupported edge id data type, please use int"); CUGRAPH_EXPECTS((typeid(WT) == typeid(float)) || (typeid(WT) == typeid(double)), "Unsupported weight data type, please use float or double"); VT number_of_vertices = graph.number_of_vertices; ET number_of_edges = graph.number_of_edges; const VT *indices_ptr = graph.indices; const ET *offsets_ptr = graph.offsets; int alpha = 15; int beta = 18; // FIXME: Use VT and ET in the BFS detail cugraph::detail::BFS<VT> bfs( number_of_vertices, number_of_edges, offsets_ptr, indices_ptr, directed, alpha, beta); bfs.configure(distances, predecessors, sp_counters, nullptr); bfs.traverse(start_vertex); } template void bfs<int, int, float>(experimental::GraphCSRView<int, int, float> const &graph, int *distances, int *predecessors, double *sp_counters, const int source_vertex, bool directed); template void bfs<int, int, double>(experimental::GraphCSRView<int, int, double> const &graph, int *distances, int *predecessors, double *sp_counters, const int source_vertex, bool directed); } // namespace cugraph
9f2e27e9e14ea69b5bd6855d0c606ef48646597d.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation * and any modifications thereto. Any use, reproduction, disclosure or * distribution of this software and related documentation without an express * license agreement from NVIDIA CORPORATION is strictly prohibited. * */ #include <algorithm> #include <iomanip> #include <limits> #include "bfs.cuh" #include "graph.hpp" #include <utilities/error_utils.h> #include "bfs_kernels.cuh" #include "traversal_common.cuh" #include "utilities/graph_utils.cuh" namespace cugraph { namespace detail { enum BFS_ALGO_STATE { TOPDOWN, BOTTOMUP }; template <typename IndexType> void BFS<IndexType>::setup() { // Determinism flag, false by default deterministic = false; // Working data // Each vertex can be in the frontier at most once // We will update frontier during the execution // We need the orig to reset frontier, or ALLOC_FREE_TRY original_frontier.resize(number_of_vertices); frontier = original_frontier.data().get(); // size of bitmaps for vertices vertices_bmap_size = (number_of_vertices / (8 * sizeof(int)) + 1); // ith bit of visited_bmap is set <=> ith vertex is visited visited_bmap.resize(vertices_bmap_size); // ith bit of isolated_bmap is set <=> degree of ith vertex = 0 isolated_bmap.resize(vertices_bmap_size); // vertices_degree[i] = degree of vertex i vertex_degree.resize(number_of_vertices); // We will need (n+1) ints buffer for two differents things (bottom up or top down) - sharing it // since those uses are mutually exclusive buffer_np1_1.resize(number_of_vertices + 1); buffer_np1_2.resize(number_of_vertices + 1); // Using buffers : top down // frontier_vertex_degree[i] is the degree of vertex frontier[i] frontier_vertex_degree = buffer_np1_1.data().get(); // exclusive sum of frontier_vertex_degree exclusive_sum_frontier_vertex_degree = buffer_np1_2.data().get(); // Using buffers : bottom up // contains list of unvisited vertices unvisited_queue = buffer_np1_1.data().get(); // size of the "last" unvisited queue : size_last_unvisited_queue // refers to the size of unvisited_queue // which may not be up to date (the queue may contains vertices that are now // visited) // We may leave vertices unvisited after bottom up main kernels - storing them // here left_unvisited_queue = buffer_np1_2.data().get(); // We use buckets of edges (32 edges per bucket for now, see exact macro in bfs_kernels). // frontier_vertex_degree_buckets_offsets[i] is the index k such as frontier[k] is the source of // the first edge of the bucket See top down kernels for more details exclusive_sum_frontier_vertex_buckets_offsets.resize( ((number_of_edges / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2)); // Init device-side counters // Those counters must be/can be reset at each bfs iteration // Keeping them adjacent in memory allow use call only one cudaMemset - launch latency is the // current bottleneck d_counters_pad.resize(4); d_new_frontier_cnt = d_counters_pad.data().get(); d_mu = d_counters_pad.data().get() + 1; d_unvisited_cnt = d_counters_pad.data().get() + 2; d_left_unvisited_cnt = d_counters_pad.data().get() + 3; // Lets use this int* for the next 3 lines // Its dereferenced value is not initialized - so we dont care about what we // put in it IndexType *d_nisolated = d_new_frontier_cnt; cudaMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream); // Computing isolated_bmap // Only dependent on graph - not source vertex - done once traversal::flag_isolated_vertices(number_of_vertices, isolated_bmap.data().get(), row_offsets, vertex_degree.data().get(), d_nisolated, stream); cudaMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We need nisolated to be ready to use cudaStreamSynchronize(stream); } template <typename IndexType> void BFS<IndexType>::configure(IndexType *_distances, IndexType *_predecessors, double *_sp_counters, int *_edge_mask) { distances = _distances; predecessors = _predecessors; edge_mask = _edge_mask; sp_counters = _sp_counters; useEdgeMask = (edge_mask != NULL); computeDistances = (distances != NULL); computePredecessors = (predecessors != NULL); // We need distances to use bottom up if (directed && !computeDistances) { distances_vals.resize(number_of_vertices); distances = distances_vals.data().get(); } // In case the shortest path counters is required, previous_bmap has to be allocated if (sp_counters) { previous_visited_bmap.resize(vertices_bmap_size); } } template <typename IndexType> void BFS<IndexType>::traverse(IndexType source_vertex) { // Init visited_bmap // If the graph is undirected, we not that // we will never discover isolated vertices (in degree = out degree = 0) // we avoid a lot of work by flagging them now // in g500 graphs they represent ~25% of total vertices // more than that for wiki and twitter graphs if (directed) { cudaMemsetAsync(visited_bmap.data().get(), 0, vertices_bmap_size * sizeof(int), stream); } else { cudaMemcpyAsync(visited_bmap.data().get(), isolated_bmap.data().get(), vertices_bmap_size * sizeof(int), cudaMemcpyDeviceToDevice, stream); } // If needed, setting all vertices as undiscovered (inf distance) // We dont use computeDistances here // if the graph is undirected, we may need distances even if // computeDistances is false if (distances) traversal::fill_vec(distances, number_of_vertices, traversal::vec_t<IndexType>::max, stream); // If needed, setting all predecessors to non-existent (-1) if (computePredecessors) { cudaMemsetAsync(predecessors, -1, number_of_vertices * sizeof(IndexType), stream); } if (sp_counters) { cudaMemsetAsync(sp_counters, 0, number_of_vertices * sizeof(double), stream); double value = 1; cudaMemcpyAsync(sp_counters + source_vertex, &value, sizeof(double), cudaMemcpyHostToDevice); } // // Initial frontier // frontier = original_frontier.data().get(); if (distances) { cudaMemsetAsync(&distances[source_vertex], 0, sizeof(IndexType), stream); } // Setting source_vertex as visited // There may be bit already set on that bmap (isolated vertices) - if the // graph is undirected int current_visited_bmap_source_vert = 0; if (!directed) { cudaMemcpyAsync(&current_visited_bmap_source_vert, visited_bmap.data().get() + (source_vertex / INT_SIZE), sizeof(int), cudaMemcpyDeviceToHost); // We need current_visited_bmap_source_vert cudaStreamSynchronize(stream); } int m = (1 << (source_vertex % INT_SIZE)); // In that case, source is isolated, done now if (!directed && (m & current_visited_bmap_source_vert)) { // Init distances and predecessors are done, (cf Streamsync in previous if) return; } m |= current_visited_bmap_source_vert; cudaMemcpyAsync(visited_bmap.data().get() + (source_vertex / INT_SIZE), &m, sizeof(int), cudaMemcpyHostToDevice, stream); // Adding source_vertex to init frontier cudaMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), cudaMemcpyHostToDevice, stream); // mf : edges in frontier // nf : vertices in frontier // mu : edges undiscovered // nu : nodes undiscovered // lvl : current frontier's depth IndexType mf, nf, mu, nu; bool growing; IndexType lvl = 1; // Frontier has one vertex nf = 1; // all edges are undiscovered (by def isolated vertices have 0 edges) mu = number_of_edges; // all non isolated vertices are undiscovered (excepted source vertex, which is in frontier) // That number is wrong if source_vertex is also isolated - but it's not important nu = number_of_vertices - nisolated - nf; // Last frontier was 0, now it is 1 growing = true; IndexType size_last_left_unvisited_queue = number_of_vertices; // we just need value > 0 IndexType size_last_unvisited_queue = 0; // queue empty // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We need mf cudaStreamSynchronize(stream); // At first we know we have to use top down BFS_ALGO_STATE algo_state = TOPDOWN; // useDistances : we check if a vertex is a parent using distances in bottom up - distances become // working data undirected g : need parents to be in children's neighbors // In case the shortest path counters need to be computeed, the bottom_up approach cannot be used bool can_use_bottom_up = (!sp_counters && !directed && distances); while (nf > 0) { // Each vertices can appear only once in the frontierer array - we know it will fit new_frontier = frontier + nf; IndexType old_nf = nf; resetDevicePointers(); if (can_use_bottom_up) { // Choosing algo // Finite machine described in http://parlab.eecs.berkeley.edu/sites/all/parlab/files/main.pdf switch (algo_state) { case TOPDOWN: if (mf > mu / alpha) algo_state = BOTTOMUP; break; case BOTTOMUP: if (!growing && nf < number_of_vertices / beta) { // We need to prepare the switch back to top down // We couldnt keep track of mu during bottom up - because we dont know what mf is. // Computing mu here bfs_kernels::count_unvisited_edges(unvisited_queue, size_last_unvisited_queue, visited_bmap.data().get(), vertex_degree.data().get(), d_mu, stream); // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); cudaMemcpyAsync(&mu, d_mu, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We will need mf and mu cudaStreamSynchronize(stream); algo_state = TOPDOWN; } break; } } // Executing algo switch (algo_state) { case TOPDOWN: // This step is only required if sp_counters is not nullptr if (sp_counters) { cudaMemcpyAsync(previous_visited_bmap.data().get(), visited_bmap.data().get(), vertices_bmap_size * sizeof(int), cudaMemcpyDeviceToDevice, stream); // We need to copy the visited_bmap before doing the traversal cudaStreamSynchronize(stream); } traversal::compute_bucket_offsets( exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets.data().get(), nf, mf, stream); bfs_kernels::frontier_expand(row_offsets, col_indices, frontier, nf, mf, lvl, new_frontier, d_new_frontier_cnt, exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets.data().get(), previous_visited_bmap.data().get(), visited_bmap.data().get(), distances, predecessors, sp_counters, edge_mask, isolated_bmap.data().get(), directed, stream, deterministic); mu -= mf; cudaMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); CUDA_CHECK_LAST(); // We need nf cudaStreamSynchronize(stream); if (nf) { // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, new_frontier, vertex_degree.data().get(), nf, stream); traversal::exclusive_sum( frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We need mf cudaStreamSynchronize(stream); } break; case BOTTOMUP: bfs_kernels::fill_unvisited_queue(visited_bmap.data().get(), vertices_bmap_size, number_of_vertices, unvisited_queue, d_unvisited_cnt, stream, deterministic); size_last_unvisited_queue = nu; bfs_kernels::bottom_up_main(unvisited_queue, size_last_unvisited_queue, left_unvisited_queue, d_left_unvisited_cnt, visited_bmap.data().get(), row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); // The number of vertices left unvisited decreases // If it wasnt necessary last time, it wont be this time if (size_last_left_unvisited_queue) { cudaMemcpyAsync(&size_last_left_unvisited_queue, d_left_unvisited_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); CUDA_CHECK_LAST() // We need last_left_unvisited_size cudaStreamSynchronize(stream); bfs_kernels::bottom_up_large(left_unvisited_queue, size_last_left_unvisited_queue, visited_bmap.data().get(), row_offsets, col_indices, lvl, new_frontier, d_new_frontier_cnt, distances, predecessors, edge_mask, stream, deterministic); } cudaMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); CUDA_CHECK_LAST() // We will need nf cudaStreamSynchronize(stream); break; } // Updating undiscovered edges count nu -= nf; // Using new frontier frontier = new_frontier; growing = (nf > old_nf); ++lvl; } } template <typename IndexType> void BFS<IndexType>::resetDevicePointers() { cudaMemsetAsync(d_counters_pad.data().get(), 0, 4 * sizeof(IndexType), stream); } template <typename IndexType> void BFS<IndexType>::clean() { // the vectors have a destructor that takes care of cleaning } template class BFS<int>; } // namespace detail // NOTE: SP counter increase extremely fast on large graph // It can easily reach 1e40~1e70 on GAP-road.mtx template <typename VT, typename ET, typename WT> void bfs(experimental::GraphCSRView<VT, ET, WT> const &graph, VT *distances, VT *predecessors, double *sp_counters, const VT start_vertex, bool directed) { CUGRAPH_EXPECTS(typeid(VT) == typeid(int), "Unsupported vertex id data type, please use int"); CUGRAPH_EXPECTS(typeid(ET) == typeid(int), "Unsupported edge id data type, please use int"); CUGRAPH_EXPECTS((typeid(WT) == typeid(float)) || (typeid(WT) == typeid(double)), "Unsupported weight data type, please use float or double"); VT number_of_vertices = graph.number_of_vertices; ET number_of_edges = graph.number_of_edges; const VT *indices_ptr = graph.indices; const ET *offsets_ptr = graph.offsets; int alpha = 15; int beta = 18; // FIXME: Use VT and ET in the BFS detail cugraph::detail::BFS<VT> bfs( number_of_vertices, number_of_edges, offsets_ptr, indices_ptr, directed, alpha, beta); bfs.configure(distances, predecessors, sp_counters, nullptr); bfs.traverse(start_vertex); } template void bfs<int, int, float>(experimental::GraphCSRView<int, int, float> const &graph, int *distances, int *predecessors, double *sp_counters, const int source_vertex, bool directed); template void bfs<int, int, double>(experimental::GraphCSRView<int, int, double> const &graph, int *distances, int *predecessors, double *sp_counters, const int source_vertex, bool directed); } // namespace cugraph
a64f6d48e887215a15f0be03d2f065e575d94f78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Jacob Sword * Comparison of GPU summation methods **/ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <string> #include <cmath> #include <cassert> #include <cstdlib> #include <time.h> #include "./error_handler.h" #include "./wtime.h" using std::cout; using std::endl; // Linear CPU summation for baseline int sum_cpu(int *arr, int size) { int sum = 0; for (int i = 0; i < size; i++) sum += arr[i]; return sum; } // Every thread atomically adds its integers to global sum __global__ void sum_naive_kernel(int *arr, int size, int *sum) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < size) { atomicAdd(sum, arr[tid]); tid += num_threads; } } // Every thread gets local sum, shared mem used to get block sums. // Block sums atomically summed to total sum __global__ void sum_improved_atomic_kernel(int *arr, int size, int *sum) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each thread finds local sum of its assigned area int my_sum = 0; __shared__ int smem[128]; while (tid < size) { my_sum += arr[tid]; tid += num_threads; } // Load local sum into shared mem smem[threadIdx.x] = my_sum; // Barrier then use parallel reduction to get block sum __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2) { if (threadIdx.x < i) smem[threadIdx.x] += smem[threadIdx.x + i]; __syncthreads(); } // Block sum atomically added to global sum if (threadIdx.x == 0) { atomicAdd(sum, smem[0]); } } // Every thread gets local sum, smemm used to get block sums. __global__ void block_sum_kernel(int *arr, int size, int *block_sums) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each thread finds local sum of its assigned area int my_sum = 0; __shared__ int smem[128]; while (tid < size) { my_sum += arr[tid]; tid += num_threads; } smem[threadIdx.x] = my_sum; // Barrier then use parallel reduction to get block sum __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2) { if (threadIdx.x < i) { int temp = smem[threadIdx.x] + smem[threadIdx.x + i]; smem[threadIdx.x] = temp; } __syncthreads(); } // Block sum added to global arr if (threadIdx.x == 0) { block_sums[blockIdx.x] = smem[0]; } } int main(int argc, char **argv) { if (argc < 2) { cout << "Enter power for arr size " << endl; return 1; } int arr_size = 16 * pow(10, atoi(argv[1])); cout << "Using array size of " << arr_size << endl; int *arr = (int *) malloc(sizeof(int) * arr_size); srand(time(NULL)); // Initialize arr for (int i = 0; i < arr_size; i++) arr[i] = 1 + (rand() % 5); int *arr_d; HANDLE_ERR(hipMalloc((void **) &arr_d, sizeof (int) * arr_size)); HANDLE_ERR(hipMemcpy (arr_d, arr, sizeof (int) * arr_size, hipMemcpyHostToDevice)); int *sum_d; HANDLE_ERR(hipMalloc((void **) &sum_d, sizeof (int))); // CPU double starttime = wtime(); int cpu_sum = sum_cpu(arr, arr_size); double endtime = wtime(); double cpu_time = endtime - starttime; cout << "Time for cpu summation: " << cpu_time << endl; // NAIVE GPU std::string naive = "Naive GPU"; starttime = wtime(); hipLaunchKernelGGL(( sum_naive_kernel) , dim3(128), dim3(128) , 0, 0, arr_d, arr_size, sum_d); hipDeviceSynchronize(); endtime = wtime(); double naive_gpu_time = endtime - starttime; cout << "Time for " << naive << ": " << naive_gpu_time << endl; int sum; HANDLE_ERR(hipMemcpy (&sum, sum_d, sizeof (int), hipMemcpyDeviceToHost)); // Check sum assert(sum == cpu_sum); // IMPROVED GPU, using atomic add std::string improved_1 = "Improved GPU using atomic add"; // Reset device sum HANDLE_ERR(hipMemset(sum_d, 0, sizeof(int))); starttime = wtime(); hipLaunchKernelGGL(( sum_improved_atomic_kernel) , dim3(128), dim3(128) , 0, 0, arr_d, arr_size, sum_d); hipDeviceSynchronize(); endtime = wtime(); double improved_gpu_time = endtime - starttime; cout << "Time for " << improved_1 << ": " << improved_gpu_time << endl; // Check sum sum = 0; HANDLE_ERR(hipMemcpy (&sum, sum_d, sizeof (int), hipMemcpyDeviceToHost)); assert(sum == cpu_sum); // IMPROVED GPU using CPU add std::string improved_2 = "Improved GPU using CPU add"; // Create block sum arr int *block_sums_d; HANDLE_ERR(hipMalloc((void **) &block_sums_d, sizeof (int) * 128)); sum = 0; int *block_sums = (int *)malloc(sizeof(int) * 128); starttime = wtime(); hipLaunchKernelGGL(( block_sum_kernel) , dim3(128), dim3(128) , 0, 0, arr_d, arr_size, block_sums_d); hipDeviceSynchronize(); endtime = wtime(); HANDLE_ERR(hipMemcpy (block_sums, block_sums_d, sizeof (int) * 128, hipMemcpyDeviceToHost)); for (int i = 0; i < 128; i++) sum += block_sums[i]; double endtime_2 = wtime(); double improved_gpu_2_time = endtime - starttime; cout << "Time for " << improved_2 << " (before CPU sum): " << improved_gpu_2_time << endl; cout << "Time for " << improved_2 << " complete: " << endtime_2 - starttime << endl; // Check sum assert(sum == cpu_sum); }
a64f6d48e887215a15f0be03d2f065e575d94f78.cu
/** * Jacob Sword * Comparison of GPU summation methods **/ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <string> #include <cmath> #include <cassert> #include <cstdlib> #include <time.h> #include "./error_handler.h" #include "./wtime.h" using std::cout; using std::endl; // Linear CPU summation for baseline int sum_cpu(int *arr, int size) { int sum = 0; for (int i = 0; i < size; i++) sum += arr[i]; return sum; } // Every thread atomically adds its integers to global sum __global__ void sum_naive_kernel(int *arr, int size, int *sum) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < size) { atomicAdd(sum, arr[tid]); tid += num_threads; } } // Every thread gets local sum, shared mem used to get block sums. // Block sums atomically summed to total sum __global__ void sum_improved_atomic_kernel(int *arr, int size, int *sum) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each thread finds local sum of its assigned area int my_sum = 0; __shared__ int smem[128]; while (tid < size) { my_sum += arr[tid]; tid += num_threads; } // Load local sum into shared mem smem[threadIdx.x] = my_sum; // Barrier then use parallel reduction to get block sum __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2) { if (threadIdx.x < i) smem[threadIdx.x] += smem[threadIdx.x + i]; __syncthreads(); } // Block sum atomically added to global sum if (threadIdx.x == 0) { atomicAdd(sum, smem[0]); } } // Every thread gets local sum, smemm used to get block sums. __global__ void block_sum_kernel(int *arr, int size, int *block_sums) { int num_threads = blockDim.x * gridDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; // Each thread finds local sum of its assigned area int my_sum = 0; __shared__ int smem[128]; while (tid < size) { my_sum += arr[tid]; tid += num_threads; } smem[threadIdx.x] = my_sum; // Barrier then use parallel reduction to get block sum __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2) { if (threadIdx.x < i) { int temp = smem[threadIdx.x] + smem[threadIdx.x + i]; smem[threadIdx.x] = temp; } __syncthreads(); } // Block sum added to global arr if (threadIdx.x == 0) { block_sums[blockIdx.x] = smem[0]; } } int main(int argc, char **argv) { if (argc < 2) { cout << "Enter power for arr size " << endl; return 1; } int arr_size = 16 * pow(10, atoi(argv[1])); cout << "Using array size of " << arr_size << endl; int *arr = (int *) malloc(sizeof(int) * arr_size); srand(time(NULL)); // Initialize arr for (int i = 0; i < arr_size; i++) arr[i] = 1 + (rand() % 5); int *arr_d; HANDLE_ERR(cudaMalloc((void **) &arr_d, sizeof (int) * arr_size)); HANDLE_ERR(cudaMemcpy (arr_d, arr, sizeof (int) * arr_size, cudaMemcpyHostToDevice)); int *sum_d; HANDLE_ERR(cudaMalloc((void **) &sum_d, sizeof (int))); // CPU double starttime = wtime(); int cpu_sum = sum_cpu(arr, arr_size); double endtime = wtime(); double cpu_time = endtime - starttime; cout << "Time for cpu summation: " << cpu_time << endl; // NAIVE GPU std::string naive = "Naive GPU"; starttime = wtime(); sum_naive_kernel <<< 128, 128 >>> (arr_d, arr_size, sum_d); cudaDeviceSynchronize(); endtime = wtime(); double naive_gpu_time = endtime - starttime; cout << "Time for " << naive << ": " << naive_gpu_time << endl; int sum; HANDLE_ERR(cudaMemcpy (&sum, sum_d, sizeof (int), cudaMemcpyDeviceToHost)); // Check sum assert(sum == cpu_sum); // IMPROVED GPU, using atomic add std::string improved_1 = "Improved GPU using atomic add"; // Reset device sum HANDLE_ERR(cudaMemset(sum_d, 0, sizeof(int))); starttime = wtime(); sum_improved_atomic_kernel <<< 128, 128 >>> (arr_d, arr_size, sum_d); cudaDeviceSynchronize(); endtime = wtime(); double improved_gpu_time = endtime - starttime; cout << "Time for " << improved_1 << ": " << improved_gpu_time << endl; // Check sum sum = 0; HANDLE_ERR(cudaMemcpy (&sum, sum_d, sizeof (int), cudaMemcpyDeviceToHost)); assert(sum == cpu_sum); // IMPROVED GPU using CPU add std::string improved_2 = "Improved GPU using CPU add"; // Create block sum arr int *block_sums_d; HANDLE_ERR(cudaMalloc((void **) &block_sums_d, sizeof (int) * 128)); sum = 0; int *block_sums = (int *)malloc(sizeof(int) * 128); starttime = wtime(); block_sum_kernel <<< 128, 128 >>> (arr_d, arr_size, block_sums_d); cudaDeviceSynchronize(); endtime = wtime(); HANDLE_ERR(cudaMemcpy (block_sums, block_sums_d, sizeof (int) * 128, cudaMemcpyDeviceToHost)); for (int i = 0; i < 128; i++) sum += block_sums[i]; double endtime_2 = wtime(); double improved_gpu_2_time = endtime - starttime; cout << "Time for " << improved_2 << " (before CPU sum): " << improved_gpu_2_time << endl; cout << "Time for " << improved_2 << " complete: " << endtime_2 - starttime << endl; // Check sum assert(sum == cpu_sum); }
87bfa0f5b8b8aa4a58e405462b6c77239ac035d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "scalar.h" __device__ float op(float d1,float d2,float *params) { return d2 - d1; } extern "C" __global__ void sub_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
87bfa0f5b8b8aa4a58e405462b6c77239ac035d5.cu
#include "scalar.h" __device__ float op(float d1,float d2,float *params) { return d2 - d1; } extern "C" __global__ void sub_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
aa6b7b2edbc2c7d0377a814173d5319cd3926011.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "create_fpr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *tpr = NULL; hipMalloc(&tpr, XSIZE*YSIZE); const int *unique_index = NULL; hipMalloc(&unique_index, XSIZE*YSIZE); float *fpr = NULL; hipMalloc(&fpr, XSIZE*YSIZE); int num_selected = 1; int num_total = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( create_fpr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, tpr,unique_index,fpr,num_selected,num_total); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( create_fpr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, tpr,unique_index,fpr,num_selected,num_total); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( create_fpr_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, tpr,unique_index,fpr,num_selected,num_total); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
aa6b7b2edbc2c7d0377a814173d5319cd3926011.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "create_fpr_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *tpr = NULL; cudaMalloc(&tpr, XSIZE*YSIZE); const int *unique_index = NULL; cudaMalloc(&unique_index, XSIZE*YSIZE); float *fpr = NULL; cudaMalloc(&fpr, XSIZE*YSIZE); int num_selected = 1; int num_total = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); create_fpr_kernel<<<gridBlock,threadBlock>>>(tpr,unique_index,fpr,num_selected,num_total); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { create_fpr_kernel<<<gridBlock,threadBlock>>>(tpr,unique_index,fpr,num_selected,num_total); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { create_fpr_kernel<<<gridBlock,threadBlock>>>(tpr,unique_index,fpr,num_selected,num_total); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
e3d0ce5c9bb8da656d14c3650c8c01698a9e7558.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <iostream> #include <selection/knn.cuh> #include <vector> #include "test_utils.h" namespace MLCommon { namespace Selection { /** * * NOTE: Not exhaustively testing the kNN implementation since * we are using FAISS for this. Just testing API to verify the * knn.cu class is accepting inputs and providing outputs as * expected. */ template <typename T> class KNNTest : public ::testing::Test { protected: void basicTest() { auto alloc = std::make_shared<raft::mr::device::default_allocator>(); // Allocate input allocate(d_train_inputs, n * d); // Allocate reference arrays allocate<long>(d_ref_I, n * n); allocate(d_ref_D, n * n); // Allocate predicted arrays allocate<long>(d_pred_I, n * n); allocate(d_pred_D, n * n); // make testdata on host std::vector<T> h_train_inputs = {1.0, 50.0, 51.0}; h_train_inputs.resize(n); updateDevice(d_train_inputs, h_train_inputs.data(), n * d, 0); std::vector<T> h_res_D = {0.0, 49.0, 50.0, 0.0, 1.0, 49.0, 0.0, 1.0, 50.0}; h_res_D.resize(n * n); updateDevice(d_ref_D, h_res_D.data(), n * n, 0); std::vector<long> h_res_I = {0, 1, 2, 1, 2, 0, 2, 1, 0}; h_res_I.resize(n * n); updateDevice<long>(d_ref_I, h_res_I.data(), n * n, 0); std::vector<float *> input_vec = {d_train_inputs}; std::vector<int> sizes_vec = {n}; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); brute_force_knn(input_vec, sizes_vec, d, d_train_inputs, n, d_pred_I, d_pred_D, n, alloc, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(hipFree(d_train_inputs)); CUDA_CHECK(hipFree(d_pred_I)); CUDA_CHECK(hipFree(d_pred_D)); CUDA_CHECK(hipFree(d_ref_I)); CUDA_CHECK(hipFree(d_ref_D)); } protected: T *d_train_inputs; int n = 3; int d = 1; long *d_pred_I; T *d_pred_D; long *d_ref_I; T *d_ref_D; }; typedef KNNTest<float> KNNTestF; TEST_F(KNNTestF, Fit) { ASSERT_TRUE( devArrMatch(d_ref_D, d_pred_D, n * n, CompareApprox<float>(1e-3))); ASSERT_TRUE(devArrMatch(d_ref_I, d_pred_I, n * n, Compare<long>())); } }; // end namespace Selection }; // namespace MLCommon
e3d0ce5c9bb8da656d14c3650c8c01698a9e7558.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <cuda_utils.cuh> #include <iostream> #include <selection/knn.cuh> #include <vector> #include "test_utils.h" namespace MLCommon { namespace Selection { /** * * NOTE: Not exhaustively testing the kNN implementation since * we are using FAISS for this. Just testing API to verify the * knn.cu class is accepting inputs and providing outputs as * expected. */ template <typename T> class KNNTest : public ::testing::Test { protected: void basicTest() { auto alloc = std::make_shared<raft::mr::device::default_allocator>(); // Allocate input allocate(d_train_inputs, n * d); // Allocate reference arrays allocate<long>(d_ref_I, n * n); allocate(d_ref_D, n * n); // Allocate predicted arrays allocate<long>(d_pred_I, n * n); allocate(d_pred_D, n * n); // make testdata on host std::vector<T> h_train_inputs = {1.0, 50.0, 51.0}; h_train_inputs.resize(n); updateDevice(d_train_inputs, h_train_inputs.data(), n * d, 0); std::vector<T> h_res_D = {0.0, 49.0, 50.0, 0.0, 1.0, 49.0, 0.0, 1.0, 50.0}; h_res_D.resize(n * n); updateDevice(d_ref_D, h_res_D.data(), n * n, 0); std::vector<long> h_res_I = {0, 1, 2, 1, 2, 0, 2, 1, 0}; h_res_I.resize(n * n); updateDevice<long>(d_ref_I, h_res_I.data(), n * n, 0); std::vector<float *> input_vec = {d_train_inputs}; std::vector<int> sizes_vec = {n}; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); brute_force_knn(input_vec, sizes_vec, d, d_train_inputs, n, d_pred_I, d_pred_D, n, alloc, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void SetUp() override { basicTest(); } void TearDown() override { CUDA_CHECK(cudaFree(d_train_inputs)); CUDA_CHECK(cudaFree(d_pred_I)); CUDA_CHECK(cudaFree(d_pred_D)); CUDA_CHECK(cudaFree(d_ref_I)); CUDA_CHECK(cudaFree(d_ref_D)); } protected: T *d_train_inputs; int n = 3; int d = 1; long *d_pred_I; T *d_pred_D; long *d_ref_I; T *d_ref_D; }; typedef KNNTest<float> KNNTestF; TEST_F(KNNTestF, Fit) { ASSERT_TRUE( devArrMatch(d_ref_D, d_pred_D, n * n, CompareApprox<float>(1e-3))); ASSERT_TRUE(devArrMatch(d_ref_I, d_pred_I, n * n, Compare<long>())); } }; // end namespace Selection }; // namespace MLCommon
44ed8b2d7decd624cf740b51b33ac0192a582573.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void kernelForce(int NA, double* FFX, double* FFY, double* FFZ, double* EE, double* X, double* Y, double* Z, int IPBC, double *Params) { double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2; double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ; double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ; double PP0 = Params[0]; double PP1 = Params[1]; double PP2 = Params[2]; double AL1 = Params[3]; double AL2 = Params[4]; double A1 = Params[5]; double A2 = Params[6]; double RL1 = Params[7]; double RL2 = Params[8]; double D21 = Params[9]; double D22 = Params[10]; int i = blockIdx.x * blockDim.x + threadIdx.x; int bx = blockDim.x; __shared__ double coor[900]; int j = threadIdx.x; while(j < NA){ coor[j] = X[j]; coor[j+NA] = Y[j]; coor[j+2*NA] = Z[j]; j += bx; } __syncthreads(); double Xi = coor[i]; double Yi = coor[i+NA]; double Zi = coor[i+2*NA]; EPP = 0; // Forces that effect atoms indexed with i in all three axes FX2 = 0; FY2 = 0; FZ2 = 0; for(j=0; j<NA; j++) { if(i != j){ // Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ XIJ = Xi - coor[j]; YIJ = Yi - coor[j+NA]; ZIJ = Zi - coor[j+2*NA]; double DD, ID; if(IPBC != 0){ /* Uncomment if there is periodic boundary in x or y*/ if(PP0 > 0){ DD = XIJ / PP0; ID = int(DD); XIJ = XIJ - PP0*(ID+int(2.0*(DD-ID))); } if(PP1 > 0){ DD = YIJ / PP1; ID = int(DD); YIJ = YIJ - PP1*(ID+int(2.0*(DD-ID))); } /**/ if(PP2 > 0){ DD = ZIJ / PP2; ID = int(DD); ZIJ = ZIJ - PP2*(ID+int(2.0*(DD-ID))); } } RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ; RIJ = sqrt(RIJ2); // Calculate potential energy U(r) ARG1 = AL1*RIJ2; ARG2 = AL2*RIJ2; EXP1 = exp(-ARG1); EXP2 = exp(-ARG2); UIJ1 = A1*EXP1/(pow(RIJ,RL1)); UIJ2 = A2*EXP2/(pow(RIJ,RL2)); UIJ = D21*UIJ1 + D22*UIJ2; EPP = EPP+UIJ; // Calculate forces FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ); FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ); FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2; XRIJ = XIJ/RIJ; YRIJ = YIJ/RIJ; ZRIJ = ZIJ/RIJ; FX2 += FAC12*XRIJ; FY2 += FAC12*YRIJ; FZ2 += FAC12*ZRIJ; } } FFX[i] = -FX2; FFY[i] = -FY2; FFZ[i] = -FZ2; EE[i] = EPP; }
44ed8b2d7decd624cf740b51b33ac0192a582573.cu
__global__ void kernelForce(int NA, double* FFX, double* FFY, double* FFZ, double* EE, double* X, double* Y, double* Z, int IPBC, double *Params) { double XIJ, YIJ, ZIJ, RIJ, RIJ2, EPP, FX2, FY2, FZ2; double ARG1, ARG2, EXP1, EXP2, UIJ1, UIJ2, UIJ; double FAC1, FAC2, FAC12, XRIJ, YRIJ, ZRIJ; double PP0 = Params[0]; double PP1 = Params[1]; double PP2 = Params[2]; double AL1 = Params[3]; double AL2 = Params[4]; double A1 = Params[5]; double A2 = Params[6]; double RL1 = Params[7]; double RL2 = Params[8]; double D21 = Params[9]; double D22 = Params[10]; int i = blockIdx.x * blockDim.x + threadIdx.x; int bx = blockDim.x; __shared__ double coor[900]; int j = threadIdx.x; while(j < NA){ coor[j] = X[j]; coor[j+NA] = Y[j]; coor[j+2*NA] = Z[j]; j += bx; } __syncthreads(); double Xi = coor[i]; double Yi = coor[i+NA]; double Zi = coor[i+2*NA]; EPP = 0; // Forces that effect atoms indexed with i in all three axes FX2 = 0; FY2 = 0; FZ2 = 0; for(j=0; j<NA; j++) { if(i != j){ // Apply periodic boundaries and find distances between atom I and j. RIJ2 is square of RIJ XIJ = Xi - coor[j]; YIJ = Yi - coor[j+NA]; ZIJ = Zi - coor[j+2*NA]; double DD, ID; if(IPBC != 0){ /* Uncomment if there is periodic boundary in x or y*/ if(PP0 > 0){ DD = XIJ / PP0; ID = int(DD); XIJ = XIJ - PP0*(ID+int(2.0*(DD-ID))); } if(PP1 > 0){ DD = YIJ / PP1; ID = int(DD); YIJ = YIJ - PP1*(ID+int(2.0*(DD-ID))); } /**/ if(PP2 > 0){ DD = ZIJ / PP2; ID = int(DD); ZIJ = ZIJ - PP2*(ID+int(2.0*(DD-ID))); } } RIJ2 = XIJ*XIJ + YIJ*YIJ + ZIJ*ZIJ; RIJ = sqrt(RIJ2); // Calculate potential energy U(r) ARG1 = AL1*RIJ2; ARG2 = AL2*RIJ2; EXP1 = exp(-ARG1); EXP2 = exp(-ARG2); UIJ1 = A1*EXP1/(pow(RIJ,RL1)); UIJ2 = A2*EXP2/(pow(RIJ,RL2)); UIJ = D21*UIJ1 + D22*UIJ2; EPP = EPP+UIJ; // Calculate forces FAC1 = -(RL1/RIJ + 2.0*AL1*RIJ); FAC2 = -(RL2/RIJ + 2.0*AL2*RIJ); FAC12 = FAC1*D21*UIJ1 + FAC2*D22*UIJ2; XRIJ = XIJ/RIJ; YRIJ = YIJ/RIJ; ZRIJ = ZIJ/RIJ; FX2 += FAC12*XRIJ; FY2 += FAC12*YRIJ; FZ2 += FAC12*ZRIJ; } } FFX[i] = -FX2; FFY[i] = -FY2; FFZ[i] = -FZ2; EE[i] = EPP; }
29e29772d3f4d8c60cf9fea0b8807c6bcf95843d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // softmax.cu #include <algorithm> #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/softmax.hpp> #include <nbla/cuda/limits.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_softmax_forward(const int size0x2_, const int size1_, const int size2_, const T *x, T *y) { NBLA_CUDA_KERNEL_LOOP(idx, size0x2_) { const int i0 = idx / size2_; const int i2 = idx % size2_; // compute maximum T max_x = nbla::numeric_limits_cuda<T>::min(); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; max_x = max(max_x, x[k]); } // Compute exponential and sum T exp_sum = T(0); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; const T tmp = exp(x[k] - max_x); y[k] = tmp; exp_sum += tmp; } // Compute softmax for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; y[k] = y[k] / exp_sum; } } } template <typename T, bool accum> __global__ void kernel_softmax_backward(const int size0x2_, const int size1_, const int size2_, const T *y, const T *dy, T *dx) { NBLA_CUDA_KERNEL_LOOP(idx, size0x2_) { const int i0 = idx / size2_; const int i2 = idx % size2_; // compute sum of dy * y T dyy_sum = T(0); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; dyy_sum += dy[k] * y[k]; } // Compute backward for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; dx[k] = (accum ? dx[k] : (T)0) + y[k] * (dy[k] - dyy_sum); } } } template <class T> void SoftmaxCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); // Setting up variables const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_softmax_forward, this->size0_ * this->size2_, this->size1_, this->size2_, x, y); } template <class T> void SoftmaxCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(std::stoi(this->ctx_.device_id)); // Setting up variables const Tc *y = outputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_softmax_backward<Tc, true>), this->size0_ * this->size2_, this->size1_, this->size2_, y, dy, dx); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_softmax_backward<Tc, false>), this->size0_ * this->size2_, this->size1_, this->size2_, y, dy, dx); } } }
29e29772d3f4d8c60cf9fea0b8807c6bcf95843d.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // softmax.cu #include <algorithm> #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/softmax.hpp> #include <nbla/cuda/limits.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_softmax_forward(const int size0x2_, const int size1_, const int size2_, const T *x, T *y) { NBLA_CUDA_KERNEL_LOOP(idx, size0x2_) { const int i0 = idx / size2_; const int i2 = idx % size2_; // compute maximum T max_x = nbla::numeric_limits_cuda<T>::min(); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; max_x = max(max_x, x[k]); } // Compute exponential and sum T exp_sum = T(0); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; const T tmp = exp(x[k] - max_x); y[k] = tmp; exp_sum += tmp; } // Compute softmax for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; y[k] = y[k] / exp_sum; } } } template <typename T, bool accum> __global__ void kernel_softmax_backward(const int size0x2_, const int size1_, const int size2_, const T *y, const T *dy, T *dx) { NBLA_CUDA_KERNEL_LOOP(idx, size0x2_) { const int i0 = idx / size2_; const int i2 = idx % size2_; // compute sum of dy * y T dyy_sum = T(0); for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; dyy_sum += dy[k] * y[k]; } // Compute backward for (int i1 = 0; i1 < size1_; ++i1) { const int k = (i0 * size1_ + i1) * size2_ + i2; dx[k] = (accum ? dx[k] : (T)0) + y[k] * (dy[k] - dyy_sum); } } } template <class T> void SoftmaxCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); // Setting up variables const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_softmax_forward, this->size0_ * this->size2_, this->size1_, this->size2_, x, y); } template <class T> void SoftmaxCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!propagate_down[0]) { return; } cuda_set_device(std::stoi(this->ctx_.device_id)); // Setting up variables const Tc *y = outputs[0]->get_data_pointer<Tc>(this->ctx_); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_softmax_backward<Tc, true>), this->size0_ * this->size2_, this->size1_, this->size2_, y, dy, dx); } else { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_softmax_backward<Tc, false>), this->size0_ * this->size2_, this->size1_, this->size2_, y, dy, dx); } } }
889fadd5b55f2c215f895056e110fa0571c8f47b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Writen by https://github.com/myownskyW7. All Rights Reserved. */ #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <THH/THHAtomics.cuh> #include <cmath> using namespace at; #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define MAXIMIZE_KERNEL_SIZE true #define kTileDim 32 #define kBlockRows 8 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t WARP_SHFL_DOWN(scalar_t val, int offset) { return __shfl_down_sync(FULL_MASK, val, offset); } template<> __device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half val, int offset) { return c10::Half(WARP_SHFL_DOWN<unsigned short>(val.x, offset), c10::Half::from_bits_t{}); } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) // val += __shfl_down_sync(FULL_MASK, val, offset); val += WARP_SHFL_DOWN(val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void CARAFEForward( const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ top_data) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int start_w = down_pw - (kernel_size - 1) / 2; const int end_w = down_pw + (kernel_size - 1) / 2 + 1; const int start_h = down_ph - (kernel_size - 1) / 2; const int end_h = down_ph + (kernel_size - 1) / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { int mask_index = Loc2Index(n, ph, pw, c, height, width, mask_channels); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy++) { #pragma unroll for (int ix = start_w; ix < end_w; ix++) { if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { continue; } int mask_iy = iy - down_ph + (kernel_size - 1) / 2; int mask_ix = ix - down_pw + (kernel_size - 1) / 2; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, down_height, down_width, channels); output_val += bottom_data[feat_index] * shared_mask[mask_c * WARP_SIZE + pixel_id]; } } int top_index = Loc2Index(n, ph, pw, c, height, width, channels); top_data[top_index] = output_val; } } int CARAFEForwardLaucher(const at::Tensor features, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rfeatures, at::Tensor routput, at::Tensor rmasks, at::Tensor output) { // one warp per pixel hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NCHW2NHWC_Feature", ([&] { const scalar_t *bottom_data = features.data_ptr<scalar_t>(); scalar_t *top_data = rfeatures.data_ptr<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(input_height * input_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, channels, input_height * input_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NCHW2NHWC_Masks", ([&] { const scalar_t *bottom_data = masks.data_ptr<scalar_t>(); scalar_t *top_data = rmasks.data_ptr<scalar_t>(); const int dh = divideUP(mask_channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, mask_channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "CARAFELaucherForward", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *bottom_data = rfeatures.data_ptr<scalar_t>(); const scalar_t *bottom_masks = rmasks.data_ptr<scalar_t>(); scalar_t *top_data = routput.data_ptr<scalar_t>(); hipLaunchKernelGGL(( CARAFEForward<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, bottom_data, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NHWC2NCHW", ([&] { const scalar_t *bottom_data = routput.data_ptr<scalar_t>(); scalar_t *top_data = output.data_ptr<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, output_height * output_width, channels, dh, dw, bottom_data, top_data); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void CARAFEBackward_Feature( const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ bottom_diff) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int start_w = pw - (kernel_size - 1) * scale_factor / 2; const int end_w = pw + (kernel_size - 1) * scale_factor / 2 + 1; const int start_h = ph - (kernel_size - 1) * scale_factor / 2; const int end_h = ph + (kernel_size - 1) * scale_factor / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { const int mask_w = (c % kernel_size) * scale_factor; const int mask_h = (c / kernel_size % kernel_size) * scale_factor; const int mask_x = start_w + mask_w; const int mask_y = start_h + mask_h; if (mask_y < 0 || mask_y > height - 1 || mask_x < 0 || mask_x > width - 1) { shared_mask[c * WARP_SIZE + pixel_id] = 0; continue; } const int mask_group = c / (kernel_size * kernel_size); const int mask_c = (2 * mask_group + 1) * kernel_size * kernel_size - c - 1; int mask_index = Loc2Index(n, mask_c, mask_y, mask_x, mask_channels, height, width); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; int top_index = Loc2Index(n, ph, pw, c, height, width, channels); scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy += scale_factor) { #pragma unroll for (int ix = start_w; ix < end_w; ix += scale_factor) { if (iy < 0 || iy > height - 1 || ix < 0 || ix > width - 1) { continue; } int mask_iy = (iy - ph + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_ix = (ix - pw + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, height, width, channels); output_val += shared_mask[mask_c * WARP_SIZE + pixel_id] * top_diff[feat_index]; } } bottom_diff[top_index] = output_val; } } template <typename scalar_t> __global__ void FeatureSum(const int num_kernels, const scalar_t *__restrict__ input_data, const int scale_factor, const int channels, const int height, const int width, scalar_t *__restrict__ output_data) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { scalar_t output_val = 0; for (int iy = ph * scale_factor; iy < (ph + 1) * scale_factor; iy++) { for (int ix = pw * scale_factor; ix < (pw + 1) * scale_factor; ix++) { int input_id = Loc2Index(n, iy, ix, c, height * scale_factor, width * scale_factor, channels); output_val += input_data[input_id]; } } const int output_id = Loc2Index(n, ph, pw, c, height, width, channels); output_data[output_id] = output_val; } } template <typename scalar_t> __global__ void CARAFEBackward_Mask(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ mask_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int mask_c = index % mask_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / mask_channels; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int mask_group = mask_c / (kernel_size * kernel_size); const int mask_loc = mask_c % (kernel_size * kernel_size); const int offset_x = mask_loc % kernel_size - (kernel_size - 1) / 2; const int offset_y = mask_loc / kernel_size % kernel_size - (kernel_size - 1) / 2; const int down_x = down_pw + offset_x; const int down_y = down_ph + offset_y; scalar_t output_val = 0; if (down_y >= 0 && down_y <= down_height - 1 && down_x >= 0 && down_x <= down_width - 1) { const int channels_per_mask = ceilf(channels / (float)group_size); const int start = channels_per_mask * mask_group; const int end = min(channels_per_mask * (mask_group + 1), channels); for (int c = start + lane_id; c < end; c += WARP_SIZE) { int bottom_id = Loc2Index(n, down_y, down_x, c, down_height, down_width, channels); int top_id = Loc2Index(n, ph, pw, c, height, width, channels); output_val += top_diff[top_id] * bottom_data[bottom_id]; } } __syncwarp(); output_val = warpReduceSum(output_val); if (lane_id == 0) { const int mask_id = Loc2Index(n, ph, pw, mask_c, height, width, mask_channels); mask_diff[mask_id] = output_val; } } int CARAFEBackwardLaucher(const at::Tensor top_grad, const at::Tensor rfeatures, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rtop_grad, at::Tensor rbottom_grad_hs, at::Tensor rbottom_grad, at::Tensor rmask_grad, at::Tensor bottom_grad, at::Tensor mask_grad) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data_ptr<scalar_t>(); scalar_t *top_data = rtop_grad.data_ptr<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFELaucherBackward_Feature", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data_ptr<scalar_t>(); const scalar_t *bottom_masks = masks.data_ptr<scalar_t>(); scalar_t *bottom_diff = rbottom_grad_hs.data_ptr<scalar_t>(); hipLaunchKernelGGL(( CARAFEBackward_Feature<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "FeatureSum", ([&] { const int num_kernels = batch_size * input_height * input_width * THREADS_PER_PIXEL; const scalar_t *bottom_diff_hs = rbottom_grad_hs.data_ptr<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data_ptr<scalar_t>(); hipLaunchKernelGGL(( FeatureSum<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, bottom_diff_hs, scale_factor, channels, input_height, input_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data_ptr<scalar_t>(); scalar_t *top_data = bottom_grad.data_ptr<scalar_t>(); const int dh = divideUP(input_height * input_width, kTileDim); const int dw = divideUP(channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, input_height * input_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFELaucherBackward_Mask", ([&] { const int num_kernels = batch_size * output_height * output_width * mask_channels * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data_ptr<scalar_t>(); const scalar_t *bottom_data = rfeatures.data_ptr<scalar_t>(); scalar_t *mask_diff = rmask_grad.data_ptr<scalar_t>(); hipLaunchKernelGGL(( CARAFEBackward_Mask<scalar_t>) , dim3(at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK), 0, stream, num_kernels, top_diff, bottom_data, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, mask_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NHWC2NCHW_Mask_Grad", ([&] { const scalar_t *bottom_data = rmask_grad.data_ptr<scalar_t>(); scalar_t *top_data = mask_grad.data_ptr<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(mask_channels, kTileDim); hipLaunchKernelGGL(( BatchTranspose2DCUDAKernel<scalar_t>) , dim3(batch_size * dh * dw), dim3(dim3(kTileDim, kBlockRows)), 0, stream, batch_size, output_height * output_width, mask_channels, dh, dw, bottom_data, top_data); })); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
889fadd5b55f2c215f895056e110fa0571c8f47b.cu
/* Writen by https://github.com/myownskyW7. All Rights Reserved. */ #include <ATen/ATen.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <THC/THCAtomics.cuh> #include <cmath> using namespace at; #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 // 32 * 32 #define WARP_SIZE 32 #define THREADS_PER_PIXEL 32 #define MAX_SHARED_MEMORY 49152 #define MAX_SHARED_SCALAR_T 6144 // 49152 / 8 = 6144 #define MAXIMIZE_KERNEL_SIZE true #define kTileDim 32 #define kBlockRows 8 #define FULL_MASK 0xffffffff inline int divideUP(const int x, const int y) { return (((x) + (y)-1) / (y)); } __device__ inline int Loc2Index(const int n, const int c, const int h, const int w, const int channel_num, const int height, const int width) { int index = w + (h + (c + n * channel_num) * height) * width; return index; } /* TODO: move this to a common place */ template <typename scalar_t> __device__ inline scalar_t min(scalar_t a, scalar_t b) { return a < b ? a : b; } template <typename scalar_t> __device__ inline scalar_t max(scalar_t a, scalar_t b) { return a > b ? a : b; } template <typename scalar_t> __device__ __forceinline__ scalar_t WARP_SHFL_DOWN(scalar_t val, int offset) { return __shfl_down_sync(FULL_MASK, val, offset); } template<> __device__ __forceinline__ c10::Half WARP_SHFL_DOWN<c10::Half>(c10::Half val, int offset) { return c10::Half(WARP_SHFL_DOWN<unsigned short>(val.x, offset), c10::Half::from_bits_t{}); } template <typename scalar_t> __device__ __forceinline__ scalar_t warpReduceSum(scalar_t val) { for (int offset = 16; offset > 0; offset /= 2) // val += __shfl_down_sync(FULL_MASK, val, offset); val += WARP_SHFL_DOWN(val, offset); return val; } // Splits the original matrix into submatrices with size 32 * 32. // Each block transposes one submatrix by loading it into shared memory. // Reference https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/ template <typename scalar_t> __global__ void BatchTranspose2DCUDAKernel(const int N, const int H, const int W, const int dh, const int dw, const scalar_t *__restrict__ X, scalar_t *__restrict__ Y) { __shared__ scalar_t tile[kTileDim][kTileDim + 1]; const int n = blockIdx.x / (dh * dw); const int k = blockIdx.x % (dh * dw); const int r = k / dw; const int c = k % dw; const int offset = n * H * W; int x = c * kTileDim + threadIdx.x; int y = r * kTileDim + threadIdx.y; if (x < W) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < H; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = X[offset + (y + i) * W + x]; } } __syncthreads(); x = r * kTileDim + threadIdx.x; y = c * kTileDim + threadIdx.y; if (x < H) { for (int i = 0; threadIdx.y + i < kTileDim && y + i < W; i += kBlockRows) { Y[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } } template <typename scalar_t> __global__ void CARAFEForward( const int num_kernels, const scalar_t *__restrict__ bottom_data, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ top_data) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int start_w = down_pw - (kernel_size - 1) / 2; const int end_w = down_pw + (kernel_size - 1) / 2 + 1; const int start_h = down_ph - (kernel_size - 1) / 2; const int end_h = down_ph + (kernel_size - 1) / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { int mask_index = Loc2Index(n, ph, pw, c, height, width, mask_channels); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy++) { #pragma unroll for (int ix = start_w; ix < end_w; ix++) { if (iy < 0 || iy > down_height - 1 || ix < 0 || ix > down_width - 1) { continue; } int mask_iy = iy - down_ph + (kernel_size - 1) / 2; int mask_ix = ix - down_pw + (kernel_size - 1) / 2; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, down_height, down_width, channels); output_val += bottom_data[feat_index] * shared_mask[mask_c * WARP_SIZE + pixel_id]; } } int top_index = Loc2Index(n, ph, pw, c, height, width, channels); top_data[top_index] = output_val; } } int CARAFEForwardLaucher(const at::Tensor features, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rfeatures, at::Tensor routput, at::Tensor rmasks, at::Tensor output) { // one warp per pixel cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NCHW2NHWC_Feature", ([&] { const scalar_t *bottom_data = features.data_ptr<scalar_t>(); scalar_t *top_data = rfeatures.data_ptr<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(input_height * input_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, channels, input_height * input_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NCHW2NHWC_Masks", ([&] { const scalar_t *bottom_data = masks.data_ptr<scalar_t>(); scalar_t *top_data = rmasks.data_ptr<scalar_t>(); const int dh = divideUP(mask_channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, mask_channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "CARAFELaucherForward", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *bottom_data = rfeatures.data_ptr<scalar_t>(); const scalar_t *bottom_masks = rmasks.data_ptr<scalar_t>(); scalar_t *top_data = routput.data_ptr<scalar_t>(); CARAFEForward<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, bottom_data, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "NHWC2NCHW", ([&] { const scalar_t *bottom_data = routput.data_ptr<scalar_t>(); scalar_t *top_data = output.data_ptr<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, output_height * output_width, channels, dh, dw, bottom_data, top_data); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void CARAFEBackward_Feature( const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_masks, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ bottom_diff) { #if MAXIMIZE_KERNEL_SIZE __shared__ float shared_mask[MAX_SHARED_SCALAR_T * 2]; #else __shared__ scalar_t shared_mask[MAX_SHARED_SCALAR_T]; #endif int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int pixel_id = threadIdx.x / THREADS_PER_PIXEL; const int split_id = threadIdx.x % THREADS_PER_PIXEL; // (n, c, ph, pw) is an element in the bottom_data index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int start_w = pw - (kernel_size - 1) * scale_factor / 2; const int end_w = pw + (kernel_size - 1) * scale_factor / 2 + 1; const int start_h = ph - (kernel_size - 1) * scale_factor / 2; const int end_h = ph + (kernel_size - 1) * scale_factor / 2 + 1; for (int c = split_id; c < mask_channels; c += THREADS_PER_PIXEL) { const int mask_w = (c % kernel_size) * scale_factor; const int mask_h = (c / kernel_size % kernel_size) * scale_factor; const int mask_x = start_w + mask_w; const int mask_y = start_h + mask_h; if (mask_y < 0 || mask_y > height - 1 || mask_x < 0 || mask_x > width - 1) { shared_mask[c * WARP_SIZE + pixel_id] = 0; continue; } const int mask_group = c / (kernel_size * kernel_size); const int mask_c = (2 * mask_group + 1) * kernel_size * kernel_size - c - 1; int mask_index = Loc2Index(n, mask_c, mask_y, mask_x, mask_channels, height, width); shared_mask[c * WARP_SIZE + pixel_id] = bottom_masks[mask_index]; } __syncthreads(); const int channels_per_group = ceilf(channels / (float)group_size); #pragma unroll for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { int mask_group = c / channels_per_group; int top_index = Loc2Index(n, ph, pw, c, height, width, channels); scalar_t output_val = 0; #pragma unroll for (int iy = start_h; iy < end_h; iy += scale_factor) { #pragma unroll for (int ix = start_w; ix < end_w; ix += scale_factor) { if (iy < 0 || iy > height - 1 || ix < 0 || ix > width - 1) { continue; } int mask_iy = (iy - ph + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_ix = (ix - pw + (kernel_size - 1) * scale_factor / 2) / scale_factor; int mask_c = (mask_group * kernel_size + mask_iy) * kernel_size + mask_ix; int feat_index = Loc2Index(n, iy, ix, c, height, width, channels); output_val += shared_mask[mask_c * WARP_SIZE + pixel_id] * top_diff[feat_index]; } } bottom_diff[top_index] = output_val; } } template <typename scalar_t> __global__ void FeatureSum(const int num_kernels, const scalar_t *__restrict__ input_data, const int scale_factor, const int channels, const int height, const int width, scalar_t *__restrict__ output_data) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int split_id = threadIdx.x % THREADS_PER_PIXEL; index = index / THREADS_PER_PIXEL; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; for (int c = split_id; c < channels; c += THREADS_PER_PIXEL) { scalar_t output_val = 0; for (int iy = ph * scale_factor; iy < (ph + 1) * scale_factor; iy++) { for (int ix = pw * scale_factor; ix < (pw + 1) * scale_factor; ix++) { int input_id = Loc2Index(n, iy, ix, c, height * scale_factor, width * scale_factor, channels); output_val += input_data[input_id]; } } const int output_id = Loc2Index(n, ph, pw, c, height, width, channels); output_data[output_id] = output_val; } } template <typename scalar_t> __global__ void CARAFEBackward_Mask(const int num_kernels, const scalar_t *__restrict__ top_diff, const scalar_t *__restrict__ bottom_data, const int kernel_size, const int group_size, const int scale_factor, const int channels, const int down_height, const int down_width, const int height, const int width, const int mask_channels, scalar_t *__restrict__ mask_diff) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index > num_kernels - 1) { return; } const int lane_id = index % WARP_SIZE; index = index / WARP_SIZE; const int mask_c = index % mask_channels; // (n, c, ph, pw) is an element in the bottom_data index = index / mask_channels; const int pw = index % width; const int ph = (index / width) % height; const int n = index / width / height; const int down_pw = pw / scale_factor; const int down_ph = ph / scale_factor; const int mask_group = mask_c / (kernel_size * kernel_size); const int mask_loc = mask_c % (kernel_size * kernel_size); const int offset_x = mask_loc % kernel_size - (kernel_size - 1) / 2; const int offset_y = mask_loc / kernel_size % kernel_size - (kernel_size - 1) / 2; const int down_x = down_pw + offset_x; const int down_y = down_ph + offset_y; scalar_t output_val = 0; if (down_y >= 0 && down_y <= down_height - 1 && down_x >= 0 && down_x <= down_width - 1) { const int channels_per_mask = ceilf(channels / (float)group_size); const int start = channels_per_mask * mask_group; const int end = min(channels_per_mask * (mask_group + 1), channels); for (int c = start + lane_id; c < end; c += WARP_SIZE) { int bottom_id = Loc2Index(n, down_y, down_x, c, down_height, down_width, channels); int top_id = Loc2Index(n, ph, pw, c, height, width, channels); output_val += top_diff[top_id] * bottom_data[bottom_id]; } } __syncwarp(); output_val = warpReduceSum(output_val); if (lane_id == 0) { const int mask_id = Loc2Index(n, ph, pw, mask_c, height, width, mask_channels); mask_diff[mask_id] = output_val; } } int CARAFEBackwardLaucher(const at::Tensor top_grad, const at::Tensor rfeatures, const at::Tensor masks, const int kernel_size, const int group_size, const int scale_factor, const int batch_size, const int channels, const int input_height, const int input_width, const int output_height, const int output_width, const int mask_channels, at::Tensor rtop_grad, at::Tensor rbottom_grad_hs, at::Tensor rbottom_grad, at::Tensor rmask_grad, at::Tensor bottom_grad, at::Tensor mask_grad) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NCHW2NHWC_Top_Grad", ([&] { const scalar_t *bottom_data = top_grad.data_ptr<scalar_t>(); scalar_t *top_data = rtop_grad.data_ptr<scalar_t>(); const int dh = divideUP(channels, kTileDim); const int dw = divideUP(output_height * output_width, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, channels, output_height * output_width, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFELaucherBackward_Feature", ([&] { const int num_kernels = batch_size * output_height * output_width * THREADS_PER_PIXEL; const scalar_t *top_diff = rtop_grad.data_ptr<scalar_t>(); const scalar_t *bottom_masks = masks.data_ptr<scalar_t>(); scalar_t *bottom_diff = rbottom_grad_hs.data_ptr<scalar_t>(); CARAFEBackward_Feature<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_masks, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "FeatureSum", ([&] { const int num_kernels = batch_size * input_height * input_width * THREADS_PER_PIXEL; const scalar_t *bottom_diff_hs = rbottom_grad_hs.data_ptr<scalar_t>(); scalar_t *bottom_diff = rbottom_grad.data_ptr<scalar_t>(); FeatureSum<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, bottom_diff_hs, scale_factor, channels, input_height, input_width, bottom_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NHWC2NCHW_Bottom_Grad", ([&] { const scalar_t *bottom_data = rbottom_grad.data_ptr<scalar_t>(); scalar_t *top_data = bottom_grad.data_ptr<scalar_t>(); const int dh = divideUP(input_height * input_width, kTileDim); const int dw = divideUP(channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, input_height * input_width, channels, dh, dw, bottom_data, top_data); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "CARAFELaucherBackward_Mask", ([&] { const int num_kernels = batch_size * output_height * output_width * mask_channels * WARP_SIZE; const scalar_t *top_diff = rtop_grad.data_ptr<scalar_t>(); const scalar_t *bottom_data = rfeatures.data_ptr<scalar_t>(); scalar_t *mask_diff = rmask_grad.data_ptr<scalar_t>(); CARAFEBackward_Mask<scalar_t> <<<at::cuda::ATenCeilDiv(num_kernels, THREADS_PER_BLOCK), THREADS_PER_BLOCK, 0, stream>>>( num_kernels, top_diff, bottom_data, kernel_size, group_size, scale_factor, channels, input_height, input_width, output_height, output_width, mask_channels, mask_diff); })); AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "NHWC2NCHW_Mask_Grad", ([&] { const scalar_t *bottom_data = rmask_grad.data_ptr<scalar_t>(); scalar_t *top_data = mask_grad.data_ptr<scalar_t>(); const int dh = divideUP(output_height * output_width, kTileDim); const int dw = divideUP(mask_channels, kTileDim); BatchTranspose2DCUDAKernel<scalar_t> <<<batch_size * dh * dw, dim3(kTileDim, kBlockRows), 0, stream>>>( batch_size, output_height * output_width, mask_channels, dh, dw, bottom_data, top_data); })); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
ac004fd001c9bf42370467228f6ebcba6c6dfeeb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*-------------------------------------------------------------------- * * PURPOSE * * This program numerically solves the 2D incompressible Navier-Stokes * on a Square Domain [0,1]x[0,1] using pseudo-spectral methods and * Crank-Nicolson timestepping. The numerical solution is compared to * the exact Taylor-Green Vortex Solution. * * Periodic free-slip boundary conditions and Initial conditions: * u(x,y,0)=sin(2*pi*x)cos(2*pi*y) * v(x,y,0)=-cos(2*pi*x)sin(2*pi*y) * Analytical Solution (subscript denote derivatives): * u(x,y,t)=sin(2*pi*x)cos(2*pi*y)exp(-8*pi^2*t/Re) * v(x,y,t)=-cos(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * u_y(x,y,t)=-2*pi*sin(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * v_x(x,y,t)=2*pi*sin(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * omega=v_x-u_y * * AUTHORS * * B. Cloutier, B.K. Muite, P. Rigge * 4 June 2012 * * .. Scalars .. * Nx = number of modes in x - power of 2 for FFT * Ny = number of modes in y - power of 2 for FFT * nplots = number of plots produced * plotgap = number of timesteps inbetween plots * Re = Reynold's number * dt = timestep size * tol = determines when convergences is reached * i = loop counter in x direction * j = loop counter in y direction * n = loop counter for timesteps between plots * nn = loop counter for plots * chg = error at each iteration * max = maximum error * pi = value of pi * xsize = size of real arrays in x direction * ysize = size of real arrays in y direction * gridsize = size of array for complex data * start_time = start time of computation * end_time = end time of evaluation * pland2z = Forward 2d fft plan (CUFFT) * planz2d = Backward 2d fft plan (CUFFT) * nThreads = Number of threads for GPU to use * nBlocksR = number of blocks for GPU to use for real arrays * nBlocksC = number of blocks for GPU to use for complex arrays * * .. Arrays on CPU .. * * u = velocity in x direction * v = velocity in y direction * omeg = vorticity in real space * omegold = vorticity in real space at previous * iterate * omegexact = taylor-green vorticity at * at final step * x = x locations * y = y locations * * .. Arrays on GPU .. * * u_d = velocity in x direction * v_d = velocity in y direction * omeg_d = vorticity in real space * omegold = vorticity in real space at previous * iterate * omegoldhat_d = 2D Fourier transform of vorticity at previous * iterate * nlhat_d = nonlinear term in Fourier space * nloldhat_d = nonlinear term in Fourier space * at previous iterate * omegexact = taylor-green vorticity at * at final step * psihat_d = 2D Fourier transform of streamfunction * at next iteration * omegcheck_d = store of vorticity at previous iterate * temp1_d = temporary real space used for * calculations. * temp2_d = temporary complex space used for * calculations. * kx_d = fourier frequencies in x direction * ky_d = fourier frequencies in y direction * x_d = x locations * y_d = y locations * * REFERENCES * * ACKNOWLEDGEMENTS * * The format for the complex data types and style has followed examples on * the Nvidia website * * ACCURACY * * ERROR INDICATORS AND WARNINGS * * FURTHER COMMENTS * Check that the initial iterate is consistent with the * boundary conditions for the domain specified *-------------------------------------------------------------------- * External libraries required * Cuda FFT */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <hipfft.h> #include <cutil_inline.h> typedef double2 cmplx; typedef double real; typedef hipfftDoubleReal cufftReal_; typedef hipfftDoubleComplex cufftCmplx; #define cufftMyExecF hipfftExecD2Z #define cufftMyExecB hipfftExecZ2D #define CUFFT_TFORM_FORWARD HIPFFT_D2Z #define CUFFT_TFORM_BACKWARD HIPFFT_Z2D static __device__ __host__ inline cmplx cmul(cmplx a, cmplx b){ cmplx c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } static __device__ inline cmplx cadd (cmplx a, cmplx b) { cmplx c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } static __device__ inline cmplx csub (cmplx a, cmplx b) { cmplx c; c.x = a.x - b.x; c.y = a.y - b.y; return c; } static __device__ inline cmplx cscale(cmplx a, real b) { cmplx c; c.x = a.x * b; c.y = a.y * b; return c; } void initialdata (int Nx, int Ny, real pi, real *x, real *y, real *omeg) { int i, j; for(j=0; j<Ny; j++){ for(i=0; i<Nx; i++){ omeg[Nx*j + i] = 4.0*pi*sin(2.0*pi*x[i])*sin(2.0*pi*y[j]); } } } static __global__ void nonlin1(int Ny, real *kx_d, cmplx *omeghat_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; i = idx / (Ny/2+1); cmplx c; c.x=(real)0.0; c.y=kx_d[i]; temp1_d[idx]=cmul(omeghat_d[idx], c); } static __global__ void nonlin2(real *u_d, real *temp2_d, real *nl_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; nl_d[idx]=u_d[idx]*temp2_d[idx]; } static __global__ void nonlin3(int Ny, real *ky_d, cmplx *omeghat_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int j; cmplx c; j = idx % (Ny/2+1); c.x=(real)0.0; c.y=ky_d[j]; temp1_d[idx]=cmul(omeghat_d[idx], c); } static __global__ void nonlin4(int Nx, int Ny, real *nl_d, real *v_d, real *temp2_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; real scale= (real) 1./(real)(Nx*Ny); nl_d[idx]=(nl_d[idx]+v_d[idx]*temp2_d[idx])*scale; } static __global__ void nextstep1(int Nx, int Ny, real dt, real Re, real *kx_d, real *ky_d, cmplx *omeghat_d, cmplx *omegoldhat_d, cmplx *nloldhat_d, cmplx *nlhat_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; i = idx / (Ny/2+1); j = idx % (Ny/2+1); real coef1 = (real) 1.*(-kx_d[i]*kx_d[i] - ky_d[j]*ky_d[j]); real coef2 = (real) 1./(dt); real coef3 = (real) 1./(Re); real coef4 = (real) 1.*(coef2 + (real)0.5*coef3*coef1); cmplx term1 = cscale (omegoldhat_d[idx],coef4); cmplx term2 = cadd (nloldhat_d[idx],nlhat_d[idx]); cmplx term3 = cscale (term2, (real)0.5); real coef5 = (real) 1./(coef2 - (real)0.5*coef3*coef1); omeghat_d[idx] = cscale(csub (term1,term3), coef5); } static __global__ void nextstep2(int Nx, int Ny, real *kx_d, real *ky_d, cmplx *omeghat_d, cmplx *psihat_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; i = idx / (Ny/2+1); j = idx % (Ny/2+1); real coef6 = (real) -1./(-kx_d[i]*kx_d[i] - ky_d[j]*ky_d[j] + pow((real)0.10,14)); psihat_d[idx] = cscale(omeghat_d[idx],coef6); } static __global__ void nextstep3(int Nx, int Ny, cmplx *psihat_d, real *kx_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; cmplx c; i = idx / (Ny/2+1); c.x=(real)0.0; c.y=-kx_d[i]/(real)(Nx*Ny); temp1_d[idx]= cmul(c,psihat_d[idx]); } static __global__ void nextstep4(int Nx, int Ny, cmplx *psihat_d, real *ky_d, cmplx *temp1_d){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int j; cmplx c; j = idx % (Ny/2+1); c.x=(real)0.0; c.y=ky_d[j]/(real)(Nx*Ny); temp1_d[idx]=cmul(c,psihat_d[idx]); } static __global__ void copyRealArray(real *lhs, real *rhs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; lhs[idx]=rhs[idx]; } static __global__ void copyCmplxArray(cmplx *lhs, cmplx *rhs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; lhs[idx].x=rhs[idx].x; lhs[idx].y=rhs[idx].y; } static __global__ void checkConvergence1(int Nx, int Ny, real *omeg_d, real *omegcheck_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; real scale=(real)1.0/(real)(Nx*Ny); omegcheck_d[idx] = ((omeg_d[idx]-omegcheck_d[idx])*scale); omegcheck_d[idx] = omegcheck_d[idx]*omegcheck_d[idx]; } void savedata (int Nx, int Ny, int nplot, real *omeg) { FILE *f = NULL; char nameconfig1[128]; nameconfig1[0]='\0'; sprintf (nameconfig1, "data/omeg%.10d.datbin", nplot); f = fopen (nameconfig1, "wb"); real *omegscale; omegscale = (real*)malloc (Nx * Ny * sizeof(real)); int i; for (i=0; i<Nx*Ny; i++) { omegscale[i] = omeg[i]*1./(real)(Nx*Ny); } fwrite (omegscale, sizeof(real), Nx*Ny, f); fclose (f); } int main(){ // declare variables hipfftHandle planz2d, pland2z; real chg, max; real pi; int Nx=1024; int Ny=1024; int nplots=1; int plotgap=20; real Re = 1.e0; real tol = 10.e-10; real dt = 0.000125; int nThreads; int nBlocksR; int nBlocksC; real *kx,*ky; real *x, *y, *omeg, *omegexact; int i, j, n, nn; struct timeval start_time, end_time; // declare variables for GPU real *u_d, *v_d; real *omegcheck_d, *omeg_d, *nl_d, *temp2_d; cmplx *omegoldhat_d, *nloldhat_d, *omeghat_d, *nlhat_d, *psihat_d, *temp1_d; real *kx_d, *ky_d; real *x_d, *y_d; size_t xsize = Nx * sizeof(real); size_t ysize = Ny * sizeof(real); size_t gridsize = Nx * (Ny/2+1)*sizeof(cmplx); pi = 3.14159265358979323846264338327950288419716939937510; nThreads= (min(512, Nx)); nBlocksR= (Nx * Ny / nThreads); nBlocksC= (Nx * (Ny/2+1) / nThreads); printf("Program starting\n"); printf("Grid: %d X %d\n",Nx,Ny); printf("dt: %lf\n",dt); kx=(real*) malloc(xsize); ky=(real*) malloc(ysize); x=(real*) malloc(xsize); y=(real*) malloc(ysize); omeg=(real*) malloc(Nx * Ny * sizeof(real)); omegexact=(real*) malloc(Nx * Ny * sizeof(real)); printf("Allocated CPU arrays\n"); cutilSafeCall(hipMalloc((void**)&u_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&v_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&omegcheck_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&omeg_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&nl_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&temp2_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&omegoldhat_d, gridsize)); cutilSafeCall(hipMalloc((void**)&nloldhat_d, gridsize)); cutilSafeCall(hipMalloc((void**)&omeghat_d, gridsize)); cutilSafeCall(hipMalloc((void**)&nlhat_d, gridsize)); cutilSafeCall(hipMalloc((void**)&psihat_d, gridsize)); cutilSafeCall(hipMalloc((void**)&temp1_d, gridsize)); cutilSafeCall(hipMalloc((void**)&kx_d, Nx * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&ky_d, Ny * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&x_d, Nx * sizeof(cufftReal_))); cutilSafeCall(hipMalloc((void**)&y_d, Ny * sizeof(cufftReal_))); printf("Allocated GPU arrays\n"); cufftSafeCall(hipfftPlan2d(&pland2z, Nx, Ny, CUFFT_TFORM_FORWARD)); cufftSafeCall(hipfftPlan2d(&planz2d, Nx, Ny, CUFFT_TFORM_BACKWARD)); printf("Setup FFTs\n"); // setup fourier frequencies for(i=0; i<Nx/2; i++) kx[i]=2.0*pi*(real)i; kx[Nx/2]=0; for(i=0; i<Nx/2-1; i++) kx[Nx/2+1+i] = -kx[Nx/2-1-i]; for(i=0; i<Nx; i++) x[i]=(real)i/(real)Nx; for(j=0; j<Ny/2; j++) ky[j]=2.0*pi*(real)j; ky[Ny/2]=0.0; for(j=0; j<Ny/2-1; j++) ky[Ny/2+1+j]=-ky[Ny/2-1-j]; for(j=0; j<Ny; j++) y[j]=(real)j/(real)Ny; cutilSafeCall(hipMemcpy(kx_d,kx,Nx*sizeof(real),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(x_d,x,Nx*sizeof(real),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(ky_d,ky,Ny*sizeof(real),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(y_d,y,Ny*sizeof(real),hipMemcpyHostToDevice)); printf("Setup grid and fourier frequencies\n"); //!!!!!!!!!!!!!! //!initial data! //!!!!!!!!!!!!!! initialdata(Nx,Ny,pi,x,y,omeg); cutilSafeCall(hipMemcpy(omeg_d,omeg,Ny*Nx*sizeof(real),hipMemcpyHostToDevice)); printf("Copied initial data to device\n"); hipLaunchKernelGGL(( copyRealArray) , dim3(nBlocksR), dim3(nThreads) , 0, 0, omegcheck_d, omeg_d); cutilCheckMsg("Kernel execution failed: [ copyRealArray ]"); cufftSafeCall(cufftMyExecF(pland2z,(cufftReal_ *)omeg_d,(cufftCmplx *)omeghat_d)); hipLaunchKernelGGL(( nextstep2) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx, Ny, kx_d, ky_d, omeghat_d, psihat_d); cutilCheckMsg("Kernel execution failed: [ nextstep2 ]"); hipLaunchKernelGGL(( nextstep3) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)v_d)); hipLaunchKernelGGL(( nextstep4) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep4 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)u_d)); //!!!!!!!!!!!!!!!!!!!!!!!! //!initial nonlinear term! //!!!!!!!!!!!!!!!!!!!!!!!!' hipLaunchKernelGGL(( nonlin1) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,kx_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin1 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)temp2_d)); hipLaunchKernelGGL(( nonlin2) , dim3(nBlocksR), dim3(nThreads) , 0, 0, u_d,temp2_d,nl_d); cutilCheckMsg("Kernel execution failed: [ nonlin2 ]"); hipLaunchKernelGGL(( nonlin3) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Ny,ky_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)temp2_d)); hipLaunchKernelGGL(( nonlin4) , dim3(nBlocksR), dim3(nThreads) , 0, 0, Nx,Ny,nl_d,v_d,temp2_d); cutilCheckMsg("Kernel execution failed: [ nonlin4 ]"); cutilSafeCall(hipMemcpy(omeg, nl_d, Nx * Ny * sizeof(real), hipMemcpyDeviceToHost)); cufftSafeCall(cufftMyExecF(pland2z,(cufftReal_ *)nl_d,(cufftCmplx *)nlhat_d)); //!!!!!!!!!!!!!!!!!!!!! printf("Got initial data, starting timestepping\n"); gettimeofday(&start_time, NULL); for(nn=1; nn<=nplots; nn++){ for(n=1; n<=plotgap; n++){ chg=1.0; hipLaunchKernelGGL(( copyCmplxArray) , dim3(nBlocksC), dim3(nThreads) , 0, 0, nloldhat_d,nlhat_d); cutilCheckMsg("Kernel execution failed: [ copyCmplxArray ]"); hipLaunchKernelGGL(( copyCmplxArray) , dim3(nBlocksC), dim3(nThreads) , 0, 0, omegoldhat_d,omeghat_d); cutilCheckMsg("Kernel execution failed: [ copyCmplxArray ]"); while(chg>tol){ //!!!!!!!!!!!!!!!!!!!!!! //!{n,k} nonlinear term! //!!!!!!!!!!!!!!!!!!!!!! hipLaunchKernelGGL(( nonlin1) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Ny,kx_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin1 ]"); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)temp1_d, (cufftReal_ *)temp2_d)); hipLaunchKernelGGL(( nonlin2) , dim3(nBlocksR), dim3(nThreads) , 0, 0, u_d, temp2_d, nl_d); cutilCheckMsg("Kernel execution failed: [ nonlin2 ]"); hipLaunchKernelGGL(( nonlin3) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Ny, ky_d, omeghat_d, temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin3 ]"); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)temp1_d, (cufftReal_ *)temp2_d)); hipLaunchKernelGGL(( nonlin4) , dim3(nBlocksR), dim3(nThreads) , 0, 0, Nx, Ny, nl_d, v_d, temp2_d); cutilCheckMsg("Kernel execution failed: [ nonlin4 ]"); cutilSafeCall(hipMemcpy(omeg, nl_d, Nx * Ny * sizeof(real), hipMemcpyDeviceToHost)); cufftSafeCall(cufftMyExecF(pland2z, (cufftReal_ *)nl_d, (cufftCmplx *)nlhat_d)); //!!!!!!!!!!!!!!!!!!!!! hipLaunchKernelGGL(( nextstep1) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,Ny,dt,Re,kx_d,ky_d,omeghat_d,omegoldhat_d,nloldhat_d,nlhat_d); cutilCheckMsg("Kernel execution failed: [ nextstep1 ]"); // Calculate streamfunction in fourier space, psihat hipLaunchKernelGGL(( nextstep2) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx, Ny, kx_d, ky_d, omeghat_d, psihat_d); cutilCheckMsg("Kernel execution failed: [ nextstep2 ]"); // Calculate y velocity hipLaunchKernelGGL(( nextstep3) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)v_d)); // Calculate x velocity hipLaunchKernelGGL(( nextstep4) , dim3(nBlocksC), dim3(nThreads) , 0, 0, Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep4 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)u_d)); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)omeghat_d, (cufftReal_ *)omeg_d)); hipLaunchKernelGGL(( checkConvergence1) , dim3(nBlocksR), dim3(nThreads) , 0, 0, Nx, Ny, omeg_d, omegcheck_d); cutilCheckMsg("Kernel execution failed: [ checkConvergence1 ]"); cufftSafeCall(cufftMyExecF(pland2z, (cufftReal_ *)omegcheck_d, (cufftCmplx *)temp1_d)); cutilSafeCall(hipMemcpy(omeg, temp1_d, sizeof(real), hipMemcpyDeviceToHost)); chg=omeg[0]; hipLaunchKernelGGL(( copyRealArray) , dim3(nBlocksR), dim3(nThreads) , 0, 0, omegcheck_d, omeg_d); cutilCheckMsg("Kernel execution failed: [ copyRealArray ]"); } } } gettimeofday(&end_time, NULL); start_time.tv_sec = end_time.tv_sec - start_time.tv_sec; start_time.tv_usec = end_time.tv_usec - start_time.tv_usec; printf ("Timstepping took %lf seconds...\n", (real)(start_time.tv_sec) + (real)(start_time.tv_usec) / (real)1000000.); cutilSafeCall(hipMemcpy(omeg, omeg_d, Nx * Ny * sizeof(real), hipMemcpyDeviceToHost)); // get exact omega for(j=0; j<Ny; j++) { for(i=0;i<Nx;i++){ omegexact[j*Ny+i]=4.0*pi*sin(2.0*pi*x[i])*sin(2.0*pi*y[j])*exp(-8.0*pi*pi*(real)nplots*(real)plotgap*dt/Re); } } max=0; for(i=0; i<Nx*Ny;i++) { omeg[i]=omeg[i]/(real)(Nx*Ny); chg=abs(omeg[i]-omegexact[i]); if(chg>=max) max=chg; } printf("Maximum error %lf ...\n", max); // turn of saving data for benchmarking //savedata (Nx,Ny,0,omeg); printf("Saved to disk\n"); cutilSafeCall (hipFree ((void*)kx_d)); cutilSafeCall (hipFree ((void*)ky_d)); cutilSafeCall (hipFree ((void*)x_d)); cutilSafeCall (hipFree ((void*)y_d)); cutilSafeCall (hipFree ((void*)u_d)); cutilSafeCall (hipFree ((void*)v_d)); cutilSafeCall (hipFree ((void*)temp1_d)); cutilSafeCall (hipFree ((void*)temp2_d)); cutilSafeCall (hipFree ((void*)omeg_d)); cutilSafeCall (hipFree ((void*)nl_d)); cutilSafeCall (hipFree ((void*)omegcheck_d)); cutilSafeCall (hipFree ((void*)omegoldhat_d)); cutilSafeCall (hipFree ((void*)nloldhat_d)); cutilSafeCall (hipFree ((void*)nlhat_d)); cutilSafeCall (hipFree ((void*)psihat_d)); printf("Deallocated GPU arrays \n"); cufftSafeCall (hipfftDestroy (pland2z)); cufftSafeCall (hipfftDestroy (planz2d)); printf("Destroyed CUFFT plans \n"); free (kx); free (ky); free (x); free (y); free (omeg); free (omegexact); printf ("Deallocated CPU arrays\n"); printf("End Program \n"); return 0; }
ac004fd001c9bf42370467228f6ebcba6c6dfeeb.cu
/*-------------------------------------------------------------------- * * PURPOSE * * This program numerically solves the 2D incompressible Navier-Stokes * on a Square Domain [0,1]x[0,1] using pseudo-spectral methods and * Crank-Nicolson timestepping. The numerical solution is compared to * the exact Taylor-Green Vortex Solution. * * Periodic free-slip boundary conditions and Initial conditions: * u(x,y,0)=sin(2*pi*x)cos(2*pi*y) * v(x,y,0)=-cos(2*pi*x)sin(2*pi*y) * Analytical Solution (subscript denote derivatives): * u(x,y,t)=sin(2*pi*x)cos(2*pi*y)exp(-8*pi^2*t/Re) * v(x,y,t)=-cos(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * u_y(x,y,t)=-2*pi*sin(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * v_x(x,y,t)=2*pi*sin(2*pi*x)sin(2*pi*y)exp(-8*pi^2*t/Re) * omega=v_x-u_y * * AUTHORS * * B. Cloutier, B.K. Muite, P. Rigge * 4 June 2012 * * .. Scalars .. * Nx = number of modes in x - power of 2 for FFT * Ny = number of modes in y - power of 2 for FFT * nplots = number of plots produced * plotgap = number of timesteps inbetween plots * Re = Reynold's number * dt = timestep size * tol = determines when convergences is reached * i = loop counter in x direction * j = loop counter in y direction * n = loop counter for timesteps between plots * nn = loop counter for plots * chg = error at each iteration * max = maximum error * pi = value of pi * xsize = size of real arrays in x direction * ysize = size of real arrays in y direction * gridsize = size of array for complex data * start_time = start time of computation * end_time = end time of evaluation * pland2z = Forward 2d fft plan (CUFFT) * planz2d = Backward 2d fft plan (CUFFT) * nThreads = Number of threads for GPU to use * nBlocksR = number of blocks for GPU to use for real arrays * nBlocksC = number of blocks for GPU to use for complex arrays * * .. Arrays on CPU .. * * u = velocity in x direction * v = velocity in y direction * omeg = vorticity in real space * omegold = vorticity in real space at previous * iterate * omegexact = taylor-green vorticity at * at final step * x = x locations * y = y locations * * .. Arrays on GPU .. * * u_d = velocity in x direction * v_d = velocity in y direction * omeg_d = vorticity in real space * omegold = vorticity in real space at previous * iterate * omegoldhat_d = 2D Fourier transform of vorticity at previous * iterate * nlhat_d = nonlinear term in Fourier space * nloldhat_d = nonlinear term in Fourier space * at previous iterate * omegexact = taylor-green vorticity at * at final step * psihat_d = 2D Fourier transform of streamfunction * at next iteration * omegcheck_d = store of vorticity at previous iterate * temp1_d = temporary real space used for * calculations. * temp2_d = temporary complex space used for * calculations. * kx_d = fourier frequencies in x direction * ky_d = fourier frequencies in y direction * x_d = x locations * y_d = y locations * * REFERENCES * * ACKNOWLEDGEMENTS * * The format for the complex data types and style has followed examples on * the Nvidia website * * ACCURACY * * ERROR INDICATORS AND WARNINGS * * FURTHER COMMENTS * Check that the initial iterate is consistent with the * boundary conditions for the domain specified *-------------------------------------------------------------------- * External libraries required * Cuda FFT */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <cufft.h> #include <cutil_inline.h> typedef double2 cmplx; typedef double real; typedef cufftDoubleReal cufftReal_; typedef cufftDoubleComplex cufftCmplx; #define cufftMyExecF cufftExecD2Z #define cufftMyExecB cufftExecZ2D #define CUFFT_TFORM_FORWARD CUFFT_D2Z #define CUFFT_TFORM_BACKWARD CUFFT_Z2D static __device__ __host__ inline cmplx cmul(cmplx a, cmplx b){ cmplx c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } static __device__ inline cmplx cadd (cmplx a, cmplx b) { cmplx c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } static __device__ inline cmplx csub (cmplx a, cmplx b) { cmplx c; c.x = a.x - b.x; c.y = a.y - b.y; return c; } static __device__ inline cmplx cscale(cmplx a, real b) { cmplx c; c.x = a.x * b; c.y = a.y * b; return c; } void initialdata (int Nx, int Ny, real pi, real *x, real *y, real *omeg) { int i, j; for(j=0; j<Ny; j++){ for(i=0; i<Nx; i++){ omeg[Nx*j + i] = 4.0*pi*sin(2.0*pi*x[i])*sin(2.0*pi*y[j]); } } } static __global__ void nonlin1(int Ny, real *kx_d, cmplx *omeghat_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; i = idx / (Ny/2+1); cmplx c; c.x=(real)0.0; c.y=kx_d[i]; temp1_d[idx]=cmul(omeghat_d[idx], c); } static __global__ void nonlin2(real *u_d, real *temp2_d, real *nl_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; nl_d[idx]=u_d[idx]*temp2_d[idx]; } static __global__ void nonlin3(int Ny, real *ky_d, cmplx *omeghat_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int j; cmplx c; j = idx % (Ny/2+1); c.x=(real)0.0; c.y=ky_d[j]; temp1_d[idx]=cmul(omeghat_d[idx], c); } static __global__ void nonlin4(int Nx, int Ny, real *nl_d, real *v_d, real *temp2_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; real scale= (real) 1./(real)(Nx*Ny); nl_d[idx]=(nl_d[idx]+v_d[idx]*temp2_d[idx])*scale; } static __global__ void nextstep1(int Nx, int Ny, real dt, real Re, real *kx_d, real *ky_d, cmplx *omeghat_d, cmplx *omegoldhat_d, cmplx *nloldhat_d, cmplx *nlhat_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; i = idx / (Ny/2+1); j = idx % (Ny/2+1); real coef1 = (real) 1.*(-kx_d[i]*kx_d[i] - ky_d[j]*ky_d[j]); real coef2 = (real) 1./(dt); real coef3 = (real) 1./(Re); real coef4 = (real) 1.*(coef2 + (real)0.5*coef3*coef1); cmplx term1 = cscale (omegoldhat_d[idx],coef4); cmplx term2 = cadd (nloldhat_d[idx],nlhat_d[idx]); cmplx term3 = cscale (term2, (real)0.5); real coef5 = (real) 1./(coef2 - (real)0.5*coef3*coef1); omeghat_d[idx] = cscale(csub (term1,term3), coef5); } static __global__ void nextstep2(int Nx, int Ny, real *kx_d, real *ky_d, cmplx *omeghat_d, cmplx *psihat_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i,j; i = idx / (Ny/2+1); j = idx % (Ny/2+1); real coef6 = (real) -1./(-kx_d[i]*kx_d[i] - ky_d[j]*ky_d[j] + pow((real)0.10,14)); psihat_d[idx] = cscale(omeghat_d[idx],coef6); } static __global__ void nextstep3(int Nx, int Ny, cmplx *psihat_d, real *kx_d, cmplx *temp1_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; cmplx c; i = idx / (Ny/2+1); c.x=(real)0.0; c.y=-kx_d[i]/(real)(Nx*Ny); temp1_d[idx]= cmul(c,psihat_d[idx]); } static __global__ void nextstep4(int Nx, int Ny, cmplx *psihat_d, real *ky_d, cmplx *temp1_d){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int j; cmplx c; j = idx % (Ny/2+1); c.x=(real)0.0; c.y=ky_d[j]/(real)(Nx*Ny); temp1_d[idx]=cmul(c,psihat_d[idx]); } static __global__ void copyRealArray(real *lhs, real *rhs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; lhs[idx]=rhs[idx]; } static __global__ void copyCmplxArray(cmplx *lhs, cmplx *rhs) { int idx = blockIdx.x * blockDim.x + threadIdx.x; lhs[idx].x=rhs[idx].x; lhs[idx].y=rhs[idx].y; } static __global__ void checkConvergence1(int Nx, int Ny, real *omeg_d, real *omegcheck_d) { int idx = blockIdx.x * blockDim.x + threadIdx.x; real scale=(real)1.0/(real)(Nx*Ny); omegcheck_d[idx] = ((omeg_d[idx]-omegcheck_d[idx])*scale); omegcheck_d[idx] = omegcheck_d[idx]*omegcheck_d[idx]; } void savedata (int Nx, int Ny, int nplot, real *omeg) { FILE *f = NULL; char nameconfig1[128]; nameconfig1[0]='\0'; sprintf (nameconfig1, "data/omeg%.10d.datbin", nplot); f = fopen (nameconfig1, "wb"); real *omegscale; omegscale = (real*)malloc (Nx * Ny * sizeof(real)); int i; for (i=0; i<Nx*Ny; i++) { omegscale[i] = omeg[i]*1./(real)(Nx*Ny); } fwrite (omegscale, sizeof(real), Nx*Ny, f); fclose (f); } int main(){ // declare variables cufftHandle planz2d, pland2z; real chg, max; real pi; int Nx=1024; int Ny=1024; int nplots=1; int plotgap=20; real Re = 1.e0; real tol = 10.e-10; real dt = 0.000125; int nThreads; int nBlocksR; int nBlocksC; real *kx,*ky; real *x, *y, *omeg, *omegexact; int i, j, n, nn; struct timeval start_time, end_time; // declare variables for GPU real *u_d, *v_d; real *omegcheck_d, *omeg_d, *nl_d, *temp2_d; cmplx *omegoldhat_d, *nloldhat_d, *omeghat_d, *nlhat_d, *psihat_d, *temp1_d; real *kx_d, *ky_d; real *x_d, *y_d; size_t xsize = Nx * sizeof(real); size_t ysize = Ny * sizeof(real); size_t gridsize = Nx * (Ny/2+1)*sizeof(cmplx); pi = 3.14159265358979323846264338327950288419716939937510; nThreads= (min(512, Nx)); nBlocksR= (Nx * Ny / nThreads); nBlocksC= (Nx * (Ny/2+1) / nThreads); printf("Program starting\n"); printf("Grid: %d X %d\n",Nx,Ny); printf("dt: %lf\n",dt); kx=(real*) malloc(xsize); ky=(real*) malloc(ysize); x=(real*) malloc(xsize); y=(real*) malloc(ysize); omeg=(real*) malloc(Nx * Ny * sizeof(real)); omegexact=(real*) malloc(Nx * Ny * sizeof(real)); printf("Allocated CPU arrays\n"); cutilSafeCall(cudaMalloc((void**)&u_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&v_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&omegcheck_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&omeg_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&nl_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&temp2_d, Nx * Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&omegoldhat_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&nloldhat_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&omeghat_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&nlhat_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&psihat_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&temp1_d, gridsize)); cutilSafeCall(cudaMalloc((void**)&kx_d, Nx * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&ky_d, Ny * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&x_d, Nx * sizeof(cufftReal_))); cutilSafeCall(cudaMalloc((void**)&y_d, Ny * sizeof(cufftReal_))); printf("Allocated GPU arrays\n"); cufftSafeCall(cufftPlan2d(&pland2z, Nx, Ny, CUFFT_TFORM_FORWARD)); cufftSafeCall(cufftPlan2d(&planz2d, Nx, Ny, CUFFT_TFORM_BACKWARD)); printf("Setup FFTs\n"); // setup fourier frequencies for(i=0; i<Nx/2; i++) kx[i]=2.0*pi*(real)i; kx[Nx/2]=0; for(i=0; i<Nx/2-1; i++) kx[Nx/2+1+i] = -kx[Nx/2-1-i]; for(i=0; i<Nx; i++) x[i]=(real)i/(real)Nx; for(j=0; j<Ny/2; j++) ky[j]=2.0*pi*(real)j; ky[Ny/2]=0.0; for(j=0; j<Ny/2-1; j++) ky[Ny/2+1+j]=-ky[Ny/2-1-j]; for(j=0; j<Ny; j++) y[j]=(real)j/(real)Ny; cutilSafeCall(cudaMemcpy(kx_d,kx,Nx*sizeof(real),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(x_d,x,Nx*sizeof(real),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(ky_d,ky,Ny*sizeof(real),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(y_d,y,Ny*sizeof(real),cudaMemcpyHostToDevice)); printf("Setup grid and fourier frequencies\n"); //!!!!!!!!!!!!!! //!initial data! //!!!!!!!!!!!!!! initialdata(Nx,Ny,pi,x,y,omeg); cutilSafeCall(cudaMemcpy(omeg_d,omeg,Ny*Nx*sizeof(real),cudaMemcpyHostToDevice)); printf("Copied initial data to device\n"); copyRealArray <<< nBlocksR, nThreads >>>(omegcheck_d, omeg_d); cutilCheckMsg("Kernel execution failed: [ copyRealArray ]"); cufftSafeCall(cufftMyExecF(pland2z,(cufftReal_ *)omeg_d,(cufftCmplx *)omeghat_d)); nextstep2 <<< nBlocksC, nThreads >>> (Nx, Ny, kx_d, ky_d, omeghat_d, psihat_d); cutilCheckMsg("Kernel execution failed: [ nextstep2 ]"); nextstep3 <<< nBlocksC, nThreads >>>(Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)v_d)); nextstep4 <<< nBlocksC, nThreads >>>(Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep4 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)u_d)); //!!!!!!!!!!!!!!!!!!!!!!!! //!initial nonlinear term! //!!!!!!!!!!!!!!!!!!!!!!!!' nonlin1 <<< nBlocksC, nThreads >>>(Nx,kx_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin1 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)temp2_d)); nonlin2 <<< nBlocksR, nThreads >>>(u_d,temp2_d,nl_d); cutilCheckMsg("Kernel execution failed: [ nonlin2 ]"); nonlin3 <<< nBlocksC, nThreads >>>(Ny,ky_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)temp2_d)); nonlin4 <<< nBlocksR, nThreads >>>(Nx,Ny,nl_d,v_d,temp2_d); cutilCheckMsg("Kernel execution failed: [ nonlin4 ]"); cutilSafeCall(cudaMemcpy(omeg, nl_d, Nx * Ny * sizeof(real), cudaMemcpyDeviceToHost)); cufftSafeCall(cufftMyExecF(pland2z,(cufftReal_ *)nl_d,(cufftCmplx *)nlhat_d)); //!!!!!!!!!!!!!!!!!!!!! printf("Got initial data, starting timestepping\n"); gettimeofday(&start_time, NULL); for(nn=1; nn<=nplots; nn++){ for(n=1; n<=plotgap; n++){ chg=1.0; copyCmplxArray <<< nBlocksC, nThreads >>>(nloldhat_d,nlhat_d); cutilCheckMsg("Kernel execution failed: [ copyCmplxArray ]"); copyCmplxArray <<< nBlocksC, nThreads >>>(omegoldhat_d,omeghat_d); cutilCheckMsg("Kernel execution failed: [ copyCmplxArray ]"); while(chg>tol){ //!!!!!!!!!!!!!!!!!!!!!! //!{n,k} nonlinear term! //!!!!!!!!!!!!!!!!!!!!!! nonlin1 <<< nBlocksC, nThreads >>>(Ny,kx_d,omeghat_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin1 ]"); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)temp1_d, (cufftReal_ *)temp2_d)); nonlin2 <<< nBlocksR, nThreads >>>(u_d, temp2_d, nl_d); cutilCheckMsg("Kernel execution failed: [ nonlin2 ]"); nonlin3 <<< nBlocksC, nThreads >>>(Ny, ky_d, omeghat_d, temp1_d); cutilCheckMsg("Kernel execution failed: [ nonlin3 ]"); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)temp1_d, (cufftReal_ *)temp2_d)); nonlin4 <<< nBlocksR, nThreads >>>(Nx, Ny, nl_d, v_d, temp2_d); cutilCheckMsg("Kernel execution failed: [ nonlin4 ]"); cutilSafeCall(cudaMemcpy(omeg, nl_d, Nx * Ny * sizeof(real), cudaMemcpyDeviceToHost)); cufftSafeCall(cufftMyExecF(pland2z, (cufftReal_ *)nl_d, (cufftCmplx *)nlhat_d)); //!!!!!!!!!!!!!!!!!!!!! nextstep1 <<< nBlocksC, nThreads >>>(Nx,Ny,dt,Re,kx_d,ky_d,omeghat_d,omegoldhat_d,nloldhat_d,nlhat_d); cutilCheckMsg("Kernel execution failed: [ nextstep1 ]"); // Calculate streamfunction in fourier space, psihat nextstep2 <<< nBlocksC, nThreads >>> (Nx, Ny, kx_d, ky_d, omeghat_d, psihat_d); cutilCheckMsg("Kernel execution failed: [ nextstep2 ]"); // Calculate y velocity nextstep3 <<< nBlocksC, nThreads >>>(Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep3 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)v_d)); // Calculate x velocity nextstep4 <<< nBlocksC, nThreads >>>(Nx,Ny,psihat_d,kx_d,temp1_d); cutilCheckMsg("Kernel execution failed: [ nextstep4 ]"); cufftSafeCall(cufftMyExecB(planz2d,(cufftCmplx *)temp1_d,(cufftReal_ *)u_d)); cufftSafeCall(cufftMyExecB(planz2d, (cufftCmplx *)omeghat_d, (cufftReal_ *)omeg_d)); checkConvergence1 <<< nBlocksR, nThreads >>>(Nx, Ny, omeg_d, omegcheck_d); cutilCheckMsg("Kernel execution failed: [ checkConvergence1 ]"); cufftSafeCall(cufftMyExecF(pland2z, (cufftReal_ *)omegcheck_d, (cufftCmplx *)temp1_d)); cutilSafeCall(cudaMemcpy(omeg, temp1_d, sizeof(real), cudaMemcpyDeviceToHost)); chg=omeg[0]; copyRealArray <<< nBlocksR, nThreads >>>(omegcheck_d, omeg_d); cutilCheckMsg("Kernel execution failed: [ copyRealArray ]"); } } } gettimeofday(&end_time, NULL); start_time.tv_sec = end_time.tv_sec - start_time.tv_sec; start_time.tv_usec = end_time.tv_usec - start_time.tv_usec; printf ("Timstepping took %lf seconds...\n", (real)(start_time.tv_sec) + (real)(start_time.tv_usec) / (real)1000000.); cutilSafeCall(cudaMemcpy(omeg, omeg_d, Nx * Ny * sizeof(real), cudaMemcpyDeviceToHost)); // get exact omega for(j=0; j<Ny; j++) { for(i=0;i<Nx;i++){ omegexact[j*Ny+i]=4.0*pi*sin(2.0*pi*x[i])*sin(2.0*pi*y[j])*exp(-8.0*pi*pi*(real)nplots*(real)plotgap*dt/Re); } } max=0; for(i=0; i<Nx*Ny;i++) { omeg[i]=omeg[i]/(real)(Nx*Ny); chg=abs(omeg[i]-omegexact[i]); if(chg>=max) max=chg; } printf("Maximum error %lf ...\n", max); // turn of saving data for benchmarking //savedata (Nx,Ny,0,omeg); printf("Saved to disk\n"); cutilSafeCall (cudaFree ((void*)kx_d)); cutilSafeCall (cudaFree ((void*)ky_d)); cutilSafeCall (cudaFree ((void*)x_d)); cutilSafeCall (cudaFree ((void*)y_d)); cutilSafeCall (cudaFree ((void*)u_d)); cutilSafeCall (cudaFree ((void*)v_d)); cutilSafeCall (cudaFree ((void*)temp1_d)); cutilSafeCall (cudaFree ((void*)temp2_d)); cutilSafeCall (cudaFree ((void*)omeg_d)); cutilSafeCall (cudaFree ((void*)nl_d)); cutilSafeCall (cudaFree ((void*)omegcheck_d)); cutilSafeCall (cudaFree ((void*)omegoldhat_d)); cutilSafeCall (cudaFree ((void*)nloldhat_d)); cutilSafeCall (cudaFree ((void*)nlhat_d)); cutilSafeCall (cudaFree ((void*)psihat_d)); printf("Deallocated GPU arrays \n"); cufftSafeCall (cufftDestroy (pland2z)); cufftSafeCall (cufftDestroy (planz2d)); printf("Destroyed CUFFT plans \n"); free (kx); free (ky); free (x); free (y); free (omeg); free (omegexact); printf ("Deallocated CPU arrays\n"); printf("End Program \n"); return 0; }
a5c69a834bf8c11cdb7dbc84e0be797fca36ea4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> int ceil_div(int a, int b){ return (a + b - 1) / b; } // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(ceil_div((int)output_size, 512), 4096)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { hipLaunchKernelGGL(( RoIAlignForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(ceil_div((int)grad.numel(), 512), 4096)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { hipLaunchKernelGGL(( RoIAlignBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
a5c69a834bf8c11cdb7dbc84e0be797fca36ea4c.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> int ceil_div(int a, int b){ return (a + b - 1) / b; } // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_start_w = offset_bottom_rois[1] * spatial_scale; T roi_start_h = offset_bottom_rois[2] * spatial_scale; T roi_end_w = offset_bottom_rois[3] * spatial_scale; T roi_end_h = offset_bottom_rois[4] * spatial_scale; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignBackward at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(ceil_div((int)output_size, 512), 4096)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlign_forward", [&] { RoIAlignForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(ceil_div((int)grad.numel(), 512), 4096)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlign_backward", [&] { RoIAlignBackwardFeature<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
d564dd8cc101c7ddc76c1891a3f7b8edcf578546.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* * This sample demonstrates how use texture fetches in CUDA * * This sample takes an input PGM image (image_filename) and generates * an output PGM image (image_filename_out). This CUDA kernel performs * a simple 2D transform (rotation) on the texture coordinates (u,v). */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <simpleTexture_kernel.cu> char *image_filename = "lena_bw.pgm"; char *ref_filename = "ref_rotated.pgm"; float angle = 0.5f; // angle to rotate image by (in radians) #define MIN_EPSILON_ERROR 5e-3f //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { cutilDeviceInit(argc, argv); } else { hipSetDevice( cutGetMaxGflopsDeviceId() ); } // load image from disk float* h_data = NULL; unsigned int width, height; char* image_path = cutFindFilePath(image_filename, argv[0]); if (image_path == 0) { printf("Unable to source file file %s\n", image_filename); exit(EXIT_FAILURE); } cutilCheckError( cutLoadPGMf(image_path, &h_data, &width, &height)); unsigned int size = width * height * sizeof(float); printf("Loaded '%s', %d x %d pixels\n", image_filename, width, height); // load reference image from image (output) float *h_data_ref = (float*) malloc(size); char* ref_path = cutFindFilePath(ref_filename, argv[0]); if (ref_path == 0) { printf("Unable to find reference file %s\n", ref_filename); exit(EXIT_FAILURE); } cutilCheckError( cutLoadPGMf(ref_path, &h_data_ref, &width, &height)); // allocate device memory for result float* d_data = NULL; cutilSafeCall( hipMalloc( (void**) &d_data, size)); // allocate array and copy image data hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); hipArray* cu_array; cutilSafeCall( hipMallocArray( &cu_array, &channelDesc, width, height )); cutilSafeCall( hipMemcpyToArray( cu_array, 0, 0, h_data, size, hipMemcpyHostToDevice)); // set texture parameters tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates // Bind the array to the texture cutilSafeCall( hipBindTextureToArray( tex, cu_array, channelDesc)); dim3 dimBlock(8, 8, 1); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1); // warmup hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_data, width, height, angle); cutilSafeCall( hipDeviceSynchronize() ); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); // execute the kernel hipLaunchKernelGGL(( transformKernel), dim3(dimGrid), dim3(dimBlock), 0 , 0, d_data, width, height, angle); // check if kernel execution generated an error cutilCheckMsg("Kernel execution failed"); cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutStopTimer( timer)); printf("Processing time: %f (ms)\n", cutGetTimerValue( timer)); printf("%.2f Mpixels/sec\n", (width*height / (cutGetTimerValue( timer) / 1000.0f)) / 1e6); cutilCheckError( cutDeleteTimer( timer)); // allocate mem for the result on host side float* h_odata = (float*) malloc( size); // copy result from device to host cutilSafeCall( hipMemcpy( h_odata, d_data, size, hipMemcpyDeviceToHost) ); // write result to file char output_filename[1024]; strcpy(output_filename, image_path); strcpy(output_filename + strlen(image_path) - 4, "_out.pgm"); cutilCheckError( cutSavePGMf( output_filename, h_odata, width, height)); printf("Wrote '%s'\n", output_filename); // write regression file if necessary if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutilCheckError( cutWriteFilef( "./data/regression.dat", h_odata, width*height, 0.0)); } else { // We need to reload the data from disk, because it is inverted upon output cutilCheckError( cutLoadPGMf(output_filename, &h_odata, &width, &height)); printf("Comparing files\n"); printf("\toutput: <%s>\n", output_filename); printf("\treference: <%s>\n", ref_path); CUTBoolean res = cutComparefe( h_odata, h_data_ref, width*height, MIN_EPSILON_ERROR ); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); } // cleanup memory // free(h_data); // free(h_data_ref); // free(h_odata); cutilSafeCall(hipFree(d_data)); cutilSafeCall(hipFreeArray(cu_array)); cutFree(image_path); cutFree(ref_path); // If we are doing the QAtest, we quite without prompting if( cutCheckCmdLineFlag( argc, (const char**) argv, "qatest") || cutCheckCmdLineFlag( argc, (const char**) argv, "noprompt")) { hipDeviceReset(); exit(0); } hipDeviceReset(); }
d564dd8cc101c7ddc76c1891a3f7b8edcf578546.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* * This sample demonstrates how use texture fetches in CUDA * * This sample takes an input PGM image (image_filename) and generates * an output PGM image (image_filename_out). This CUDA kernel performs * a simple 2D transform (rotation) on the texture coordinates (u,v). */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil_inline.h> // includes, kernels #include <simpleTexture_kernel.cu> char *image_filename = "lena_bw.pgm"; char *ref_filename = "ref_rotated.pgm"; float angle = 0.5f; // angle to rotate image by (in radians) #define MIN_EPSILON_ERROR 5e-3f //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold( float* reference, float* idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) { cutilDeviceInit(argc, argv); } else { cudaSetDevice( cutGetMaxGflopsDeviceId() ); } // load image from disk float* h_data = NULL; unsigned int width, height; char* image_path = cutFindFilePath(image_filename, argv[0]); if (image_path == 0) { printf("Unable to source file file %s\n", image_filename); exit(EXIT_FAILURE); } cutilCheckError( cutLoadPGMf(image_path, &h_data, &width, &height)); unsigned int size = width * height * sizeof(float); printf("Loaded '%s', %d x %d pixels\n", image_filename, width, height); // load reference image from image (output) float *h_data_ref = (float*) malloc(size); char* ref_path = cutFindFilePath(ref_filename, argv[0]); if (ref_path == 0) { printf("Unable to find reference file %s\n", ref_filename); exit(EXIT_FAILURE); } cutilCheckError( cutLoadPGMf(ref_path, &h_data_ref, &width, &height)); // allocate device memory for result float* d_data = NULL; cutilSafeCall( cudaMalloc( (void**) &d_data, size)); // allocate array and copy image data cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); cudaArray* cu_array; cutilSafeCall( cudaMallocArray( &cu_array, &channelDesc, width, height )); cutilSafeCall( cudaMemcpyToArray( cu_array, 0, 0, h_data, size, cudaMemcpyHostToDevice)); // set texture parameters tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModeLinear; tex.normalized = true; // access with normalized texture coordinates // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray( tex, cu_array, channelDesc)); dim3 dimBlock(8, 8, 1); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1); // warmup transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle); cutilSafeCall( cudaThreadSynchronize() ); unsigned int timer = 0; cutilCheckError( cutCreateTimer( &timer)); cutilCheckError( cutStartTimer( timer)); // execute the kernel transformKernel<<< dimGrid, dimBlock, 0 >>>( d_data, width, height, angle); // check if kernel execution generated an error cutilCheckMsg("Kernel execution failed"); cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutStopTimer( timer)); printf("Processing time: %f (ms)\n", cutGetTimerValue( timer)); printf("%.2f Mpixels/sec\n", (width*height / (cutGetTimerValue( timer) / 1000.0f)) / 1e6); cutilCheckError( cutDeleteTimer( timer)); // allocate mem for the result on host side float* h_odata = (float*) malloc( size); // copy result from device to host cutilSafeCall( cudaMemcpy( h_odata, d_data, size, cudaMemcpyDeviceToHost) ); // write result to file char output_filename[1024]; strcpy(output_filename, image_path); strcpy(output_filename + strlen(image_path) - 4, "_out.pgm"); cutilCheckError( cutSavePGMf( output_filename, h_odata, width, height)); printf("Wrote '%s'\n", output_filename); // write regression file if necessary if( cutCheckCmdLineFlag( argc, (const char**) argv, "regression")) { // write file for regression test cutilCheckError( cutWriteFilef( "./data/regression.dat", h_odata, width*height, 0.0)); } else { // We need to reload the data from disk, because it is inverted upon output cutilCheckError( cutLoadPGMf(output_filename, &h_odata, &width, &height)); printf("Comparing files\n"); printf("\toutput: <%s>\n", output_filename); printf("\treference: <%s>\n", ref_path); CUTBoolean res = cutComparefe( h_odata, h_data_ref, width*height, MIN_EPSILON_ERROR ); printf( "Test %s\n", (1 == res) ? "PASSED" : "FAILED"); } // cleanup memory // free(h_data); // free(h_data_ref); // free(h_odata); cutilSafeCall(cudaFree(d_data)); cutilSafeCall(cudaFreeArray(cu_array)); cutFree(image_path); cutFree(ref_path); // If we are doing the QAtest, we quite without prompting if( cutCheckCmdLineFlag( argc, (const char**) argv, "qatest") || cutCheckCmdLineFlag( argc, (const char**) argv, "noprompt")) { cudaThreadExit(); exit(0); } cudaThreadExit(); }
9ebb8c6fe5cb1bbf2e201b6225296857ad79d56d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdint.h> #include <opencv2/opencv.hpp> #define THRESH 127 __global__ void kernel_bgr2gray(uint8_t* input, uint8_t* output){ int idx = blockDim.x * blockIdx.x + threadIdx.x; output[idx] = (uint8_t)((input[3*idx] + input[3*idx+1] + input[3*idx+2])/3); } __global__ void kernel_binary(uint8_t* input, uint8_t* output){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if(input[idx] > THRESH){ output[idx] = 255; }else{ output[idx] = 0; } } int main(int argc, char** argv){ cv::VideoCapture cap; cv::Mat color(480,640, CV_8UC3); cv::Mat gray(480,640, CV_8UC1); cv::Mat binary(480,640, CV_8UC1); cv::namedWindow("window1", CV_WINDOW_AUTOSIZE); cv::namedWindow("window2", CV_WINDOW_AUTOSIZE); uint8_t* device_color; hipMalloc((void**)&device_color, 640*480*3*sizeof(uint8_t)); uint8_t* device_gray; hipMalloc((void**)&device_gray, 640*480*sizeof(uint8_t)); uint8_t* device_binary; hipMalloc((void**)&device_binary, 640*480*sizeof(uint8_t)); cap.open(0); while(1){ cap >> color; hipMemcpy(device_color, color.data, 640*480*3*sizeof(uint8_t), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_bgr2gray) , dim3(640),dim3(480), 0, 0, device_color, device_gray); hipLaunchKernelGGL(( kernel_binary) , dim3(640),dim3(480), 0, 0, device_gray, device_binary); hipMemcpy(gray.data, device_gray, 640*480*sizeof(uint8_t), hipMemcpyDeviceToHost); hipMemcpy(binary.data, device_binary, 640*480*sizeof(uint8_t), hipMemcpyDeviceToHost); cv::imshow("window1", gray); cv::waitKey(1); cv::imshow("window2", binary); cv::waitKey(1); // printf("%d %d\n", frame.cols, frame.rows); } hipFree(device_gray); hipFree(device_binary); return 0; }
9ebb8c6fe5cb1bbf2e201b6225296857ad79d56d.cu
#include <cuda.h> #include <stdio.h> #include <stdint.h> #include <opencv2/opencv.hpp> #define THRESH 127 __global__ void kernel_bgr2gray(uint8_t* input, uint8_t* output){ int idx = blockDim.x * blockIdx.x + threadIdx.x; output[idx] = (uint8_t)((input[3*idx] + input[3*idx+1] + input[3*idx+2])/3); } __global__ void kernel_binary(uint8_t* input, uint8_t* output){ int idx = blockDim.x * blockIdx.x + threadIdx.x; if(input[idx] > THRESH){ output[idx] = 255; }else{ output[idx] = 0; } } int main(int argc, char** argv){ cv::VideoCapture cap; cv::Mat color(480,640, CV_8UC3); cv::Mat gray(480,640, CV_8UC1); cv::Mat binary(480,640, CV_8UC1); cv::namedWindow("window1", CV_WINDOW_AUTOSIZE); cv::namedWindow("window2", CV_WINDOW_AUTOSIZE); uint8_t* device_color; cudaMalloc((void**)&device_color, 640*480*3*sizeof(uint8_t)); uint8_t* device_gray; cudaMalloc((void**)&device_gray, 640*480*sizeof(uint8_t)); uint8_t* device_binary; cudaMalloc((void**)&device_binary, 640*480*sizeof(uint8_t)); cap.open(0); while(1){ cap >> color; cudaMemcpy(device_color, color.data, 640*480*3*sizeof(uint8_t), cudaMemcpyHostToDevice); kernel_bgr2gray <<<640,480>>> (device_color, device_gray); kernel_binary <<<640,480>>> (device_gray, device_binary); cudaMemcpy(gray.data, device_gray, 640*480*sizeof(uint8_t), cudaMemcpyDeviceToHost); cudaMemcpy(binary.data, device_binary, 640*480*sizeof(uint8_t), cudaMemcpyDeviceToHost); cv::imshow("window1", gray); cv::waitKey(1); cv::imshow("window2", binary); cv::waitKey(1); // printf("%d %d\n", frame.cols, frame.rows); } cudaFree(device_gray); cudaFree(device_binary); return 0; }
b4276b538100c2574a7ffab04fa74bcf937b1e39.hip
// !!! This is a file automatically generated by hipify!!! /* * test.cpp * * Created on: 2017725 * Author: houyx */ #include "briskCode/BriskScaleSpace.cuh" #include "opencv2/cudafeatures2d.hpp" #include <opencv2/features2d/features2d.hpp> #include <opencv/cv.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <vector> using namespace std; void poutfloat2(float2* m, int size, std::string info) { float2 temp; memset(&temp, 0, sizeof(float2)); std::cout << info << std::endl; for (int i = 0; i < size; i++) { CUDA_CHECK_RETURN( hipMemcpy(&temp, &m[i], sizeof(float2), hipMemcpyDeviceToHost)); std::cout << "====" << info << "==== "<<i<<": (" << temp.x << "," << temp.y << ")"; std::cout << std::endl; } std::cout << "******************finish*******************" << std::endl; } void copyToKeyPoint(vector<cv::KeyPoint>& keypoints1, int size, float2* keypoints, float* kpSize, float* kpScore) { keypoints1.clear(); float2 kptemp; float kpsizetemp; float kpscoretemp; for( int i = 0; i < size; i ++ ) { CUDA_CHECK_RETURN(hipMemcpy(&kptemp, &keypoints[i], sizeof(float2), hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(&kpsizetemp, &kpSize[i], sizeof(float), hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipMemcpy(&kpscoretemp, &kpScore[i], sizeof(float), hipMemcpyDeviceToHost)); keypoints1.push_back(cv::KeyPoint(float(kptemp.x), float(kptemp.y), kpsizetemp, -1, kpscoretemp, 0)); } } void copyDescritpor( PtrStepSzb desGpu, cv::Mat& descriptor, int size, int singleSize ) { descriptor.create(size,singleSize,CV_8U); for( int i = 0; i < size; i++ ) { CUDA_CHECK_RETURN(hipMemcpy(descriptor.ptr<unsigned char>(i), &(desGpu.data[i*singleSize]), sizeof(unsigned char)*singleSize, hipMemcpyDeviceToHost)); } //descritpor.create((size, singleSize, CV_8U); } void copyDescritporDebug( PtrStepSzb desGpu, cv::Mat& descriptor, int size, int singleSize ) { descriptor.create(size,singleSize,CV_8U); CUDA_CHECK_RETURN(hipMemcpy(descriptor.data, desGpu.data, sizeof(unsigned char)*singleSize*size, hipMemcpyDeviceToHost)); //descritpor.create((size, singleSize, CV_8U); } //todo:step int main() { // cv::Mat testImg = cv::imread("data/test2.jpg"); if (!testImg.data) { cout << "load data failed" << endl; } cv::Mat testImg11 = cv::imread("data/test1.jpg"); /* cv::Mat testResize; testResize.create(testImg.rows / 2, testImg.cols / 2, CV_8U); cv::resize(testImg,testResize,testResize.size(),0,0,cv::INTER_AREA);*/ // cv::Mat testRotate; cv::transpose(testImg,testRotate); // cv::Mat testImgGray; cv::cvtColor(testImg, testImgGray, CV_BGR2GRAY); cv::Mat testImgGray1; cv::cvtColor(testRotate, testImgGray1, CV_BGR2GRAY); //GPU cv::cuda::GpuMat dstImage; cv::cuda::GpuMat dstImage1; unsigned char * dstImagedata,*dstImagedata1; hipMalloc(&dstImagedata, testImgGray.rows * testImgGray.cols); hipMalloc(&dstImagedata1, testImgGray1.rows * testImgGray1.cols); for (int i = 0; i < testImgGray.rows; i++) { hipMemcpy(dstImagedata + i * testImgGray.cols, testImgGray.data + i * testImgGray.step, sizeof(unsigned char) * testImgGray.cols, hipMemcpyHostToDevice); } for (int i = 0; i < testImgGray1.rows; i++) { hipMemcpy(dstImagedata1 + i * testImgGray1.cols, testImgGray1.data + i * testImgGray1.step, sizeof(unsigned char) * testImgGray1.cols, hipMemcpyHostToDevice); } dstImage.data = dstImagedata; dstImage.cols = testImgGray.cols; dstImage.step = testImgGray.cols; dstImage.rows = testImgGray.rows; dstImage1.data = dstImagedata1; dstImage1.cols = testImgGray1.cols; dstImage1.step = testImgGray1.cols; dstImage1.rows = testImgGray1.rows; PtrStepSzb imageIn(dstImage.rows, dstImage.cols, dstImage.data, dstImage.step); PtrStepSzb imageIn1(dstImage1.rows, dstImage1.cols, dstImage1.data, dstImage1.step); //GPU cv::Mat retestCpu(testImgGray.rows, testImgGray.cols, CV_8UC1); dstImage.download(retestCpu); cv::Mat retestCpu1(testImgGray1.rows, testImgGray1.cols, CV_8UC1); dstImage1.download(retestCpu1); cv::imshow("retestCpu", retestCpu); cv::imshow("retestCpu1", retestCpu1); cv::waitKey(); cout << "load image done!!" << endl; //brisk BRISK_Impl a(true,dstImage.rows, dstImage.cols); int2 size; hipEvent_t start, stop; float elapsedTime = 0.0; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for( int i = 0; i < 10000; i ++ ) { size = a.detectAndCompute(imageIn, a.keypointsG, a.kpSizeG, a.kpScoreG,a.descriptorsG,false); if(i%50==0) cout << "caled: " << i << endl; } size = a.detectAndCompute(imageIn, a.keypointsG, a.kpSizeG, a.kpScoreG,a.descriptorsG,false); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); cout << "time elapsed: " << elapsedTime << endl; hipEventDestroy(start); hipEventDestroy(stop); cout << "a finished" << endl; BRISK_Impl a1(true,dstImage1.rows, dstImage1.cols); int2 size1 = a1.detectAndCompute(imageIn1, a1.keypointsG, a1.kpSizeG, a1.kpScoreG,a1.descriptorsG, false); cout << " --" << size.x << " --" << size.y << endl; cout << " --" << size1.x << " --" << size1.y << endl; //GPUcopyopencv vector<cv::KeyPoint> keypoints; copyToKeyPoint(keypoints, size.x, a.keypointsG, a.kpSizeG, a.kpScoreG); vector<cv::KeyPoint> keypoints1; copyToKeyPoint(keypoints1, size1.x, a1.keypointsG, a1.kpSizeG, a1.kpScoreG); cv::Mat descriptors; copyDescritpor( a.descriptorsG, descriptors, size.y, a.strings_ ); cv::Mat descriptors1; copyDescritpor( a1.descriptorsG, descriptors1, size1.y, a1.strings_ ); /* for( int i = 0; i < size.y; i ++ ) { cout << "des: " << i << "----"; for( int j = 0; j < a.strings_; j ++ ) { cout << (int)(descriptors.at<uchar>(i,j))<<" "; } cout << endl; }*/ // cv::Mat result1; int drawmode = cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS; cv::drawKeypoints(testImgGray, keypoints, result1, cv::Scalar::all(-1), drawmode); cv::Mat result2; cv::drawKeypoints(testImgGray1, keypoints1, result2, cv::Scalar::all(-1), drawmode); cv::imshow("result1", result1); cv::imshow("result2", result2); cv::waitKey(); //match cv::BFMatcher matcher(cv::NORM_HAMMING); vector<cv::DMatch> matches; matcher.match(descriptors, descriptors1, matches); //keypoint--(x,y) = (-1,-1) descriptors int tempcount = 0; for( int i = 0; i < size.y; i ++ ) { if( keypoints[i].pt.x == -1 || keypoints[i].pt.y == -1 ) { keypoints.erase(keypoints.begin() + i); //cout << "in delete keypoints: " << i << " " << ++ tempcount << " points deleted" << endl; if(tempcount > size.y) { exit(1); } i --; } } tempcount = 0; for( int i = 0; i < size1.y; i ++ ) { if( keypoints1[i].pt.x == -1 || keypoints1[i].pt.y == -1 ) { keypoints1.erase(keypoints1.begin() + i); //cout << "in delete keypoints1: " << i << " " << ++ tempcount << " points deleted" << endl; if(tempcount > size1.y) { exit(1); } i --; } } cv::Mat img_match; cv::drawMatches(testImgGray, keypoints, testImgGray1, keypoints1, matches, img_match); cout<<"number of matched points: "<<matches.size()<<endl; cv::imshow("matches",img_match); cv::waitKey(0); cout << "end!!" << endl; return 0; }
b4276b538100c2574a7ffab04fa74bcf937b1e39.cu
/* * test.cpp * * Created on: 2017年7月25日 * Author: houyx */ #include "briskCode/BriskScaleSpace.cuh" #include "opencv2/cudafeatures2d.hpp" #include <opencv2/features2d/features2d.hpp> #include <opencv/cv.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/core/core.hpp> #include <vector> using namespace std; void poutfloat2(float2* m, int size, std::string info) { float2 temp; memset(&temp, 0, sizeof(float2)); std::cout << info << std::endl; for (int i = 0; i < size; i++) { CUDA_CHECK_RETURN( cudaMemcpy(&temp, &m[i], sizeof(float2), cudaMemcpyDeviceToHost)); std::cout << "====" << info << "==== "<<i<<": (" << temp.x << "," << temp.y << ")"; std::cout << std::endl; } std::cout << "******************finish*******************" << std::endl; } void copyToKeyPoint(vector<cv::KeyPoint>& keypoints1, int size, float2* keypoints, float* kpSize, float* kpScore) { keypoints1.clear(); float2 kptemp; float kpsizetemp; float kpscoretemp; for( int i = 0; i < size; i ++ ) { CUDA_CHECK_RETURN(cudaMemcpy(&kptemp, &keypoints[i], sizeof(float2), cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(&kpsizetemp, &kpSize[i], sizeof(float), cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(&kpscoretemp, &kpScore[i], sizeof(float), cudaMemcpyDeviceToHost)); keypoints1.push_back(cv::KeyPoint(float(kptemp.x), float(kptemp.y), kpsizetemp, -1, kpscoretemp, 0)); } } void copyDescritpor( PtrStepSzb desGpu, cv::Mat& descriptor, int size, int singleSize ) { descriptor.create(size,singleSize,CV_8U); for( int i = 0; i < size; i++ ) { CUDA_CHECK_RETURN(cudaMemcpy(descriptor.ptr<unsigned char>(i), &(desGpu.data[i*singleSize]), sizeof(unsigned char)*singleSize, cudaMemcpyDeviceToHost)); } //descritpor.create((size, singleSize, CV_8U); } void copyDescritporDebug( PtrStepSzb desGpu, cv::Mat& descriptor, int size, int singleSize ) { descriptor.create(size,singleSize,CV_8U); CUDA_CHECK_RETURN(cudaMemcpy(descriptor.data, desGpu.data, sizeof(unsigned char)*singleSize*size, cudaMemcpyDeviceToHost)); //descritpor.create((size, singleSize, CV_8U); } //todo:需要记住每次构造后要检测的图像的step必须相同 int main() { //读取图片 cv::Mat testImg = cv::imread("data/test2.jpg"); if (!testImg.data) { cout << "load data failed" << endl; } cv::Mat testImg11 = cv::imread("data/test1.jpg"); /* cv::Mat testResize; testResize.create(testImg.rows / 2, testImg.cols / 2, CV_8U); cv::resize(testImg,testResize,testResize.size(),0,0,cv::INTER_AREA);*/ //得到旋转图片 cv::Mat testRotate; cv::transpose(testImg,testRotate); //得到灰度图 cv::Mat testImgGray; cv::cvtColor(testImg, testImgGray, CV_BGR2GRAY); cv::Mat testImgGray1; cv::cvtColor(testRotate, testImgGray1, CV_BGR2GRAY); //将图片上传到GPU cv::cuda::GpuMat dstImage; cv::cuda::GpuMat dstImage1; unsigned char * dstImagedata,*dstImagedata1; cudaMalloc(&dstImagedata, testImgGray.rows * testImgGray.cols); cudaMalloc(&dstImagedata1, testImgGray1.rows * testImgGray1.cols); for (int i = 0; i < testImgGray.rows; i++) { cudaMemcpy(dstImagedata + i * testImgGray.cols, testImgGray.data + i * testImgGray.step, sizeof(unsigned char) * testImgGray.cols, cudaMemcpyHostToDevice); } for (int i = 0; i < testImgGray1.rows; i++) { cudaMemcpy(dstImagedata1 + i * testImgGray1.cols, testImgGray1.data + i * testImgGray1.step, sizeof(unsigned char) * testImgGray1.cols, cudaMemcpyHostToDevice); } dstImage.data = dstImagedata; dstImage.cols = testImgGray.cols; dstImage.step = testImgGray.cols; dstImage.rows = testImgGray.rows; dstImage1.data = dstImagedata1; dstImage1.cols = testImgGray1.cols; dstImage1.step = testImgGray1.cols; dstImage1.rows = testImgGray1.rows; PtrStepSzb imageIn(dstImage.rows, dstImage.cols, dstImage.data, dstImage.step); PtrStepSzb imageIn1(dstImage1.rows, dstImage1.cols, dstImage1.data, dstImage1.step); //把GPU的图片读出来显示,确保无误 cv::Mat retestCpu(testImgGray.rows, testImgGray.cols, CV_8UC1); dstImage.download(retestCpu); cv::Mat retestCpu1(testImgGray1.rows, testImgGray1.cols, CV_8UC1); dstImage1.download(retestCpu1); cv::imshow("retestCpu", retestCpu); cv::imshow("retestCpu1", retestCpu1); cv::waitKey(); cout << "load image done!!" << endl; //brisk计算特征点 BRISK_Impl a(true,dstImage.rows, dstImage.cols); int2 size; cudaEvent_t start, stop; float elapsedTime = 0.0; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for( int i = 0; i < 10000; i ++ ) { size = a.detectAndCompute(imageIn, a.keypointsG, a.kpSizeG, a.kpScoreG,a.descriptorsG,false); if(i%50==0) cout << "caled: " << i << endl; } size = a.detectAndCompute(imageIn, a.keypointsG, a.kpSizeG, a.kpScoreG,a.descriptorsG,false); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cout << "time elapsed: " << elapsedTime << endl; cudaEventDestroy(start); cudaEventDestroy(stop); cout << "a finished" << endl; BRISK_Impl a1(true,dstImage1.rows, dstImage1.cols); int2 size1 = a1.detectAndCompute(imageIn1, a1.keypointsG, a1.kpSizeG, a1.kpScoreG,a1.descriptorsG, false); cout << "原始图特征点数: 去边角前--" << size.x << " 去掉边角后--" << size.y << endl; cout << "旋转图特征点数: 去边角前--" << size1.x << " 去掉边角后--" << size1.y << endl; //把GPU上的特征点copy灰opencv的结构 vector<cv::KeyPoint> keypoints; copyToKeyPoint(keypoints, size.x, a.keypointsG, a.kpSizeG, a.kpScoreG); vector<cv::KeyPoint> keypoints1; copyToKeyPoint(keypoints1, size1.x, a1.keypointsG, a1.kpSizeG, a1.kpScoreG); cv::Mat descriptors; copyDescritpor( a.descriptorsG, descriptors, size.y, a.strings_ ); cv::Mat descriptors1; copyDescritpor( a1.descriptorsG, descriptors1, size1.y, a1.strings_ ); /* for( int i = 0; i < size.y; i ++ ) { cout << "des: " << i << "----"; for( int j = 0; j < a.strings_; j ++ ) { cout << (int)(descriptors.at<uchar>(i,j))<<" "; } cout << endl; }*/ //画图显示 cv::Mat result1; int drawmode = cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS; cv::drawKeypoints(testImgGray, keypoints, result1, cv::Scalar::all(-1), drawmode); cv::Mat result2; cv::drawKeypoints(testImgGray1, keypoints1, result2, cv::Scalar::all(-1), drawmode); cv::imshow("result1", result1); cv::imshow("result2", result2); cv::waitKey(); //match并画图显示 cv::BFMatcher matcher(cv::NORM_HAMMING); vector<cv::DMatch> matches; matcher.match(descriptors, descriptors1, matches); //在keypoint中删除标记为边角的点--(x,y) = (-1,-1) 注:这些点其实在descriptors中不存在 int tempcount = 0; for( int i = 0; i < size.y; i ++ ) { if( keypoints[i].pt.x == -1 || keypoints[i].pt.y == -1 ) { keypoints.erase(keypoints.begin() + i); //cout << "in delete keypoints: " << i << " " << ++ tempcount << " points deleted" << endl; if(tempcount > size.y) { exit(1); } i --; } } tempcount = 0; for( int i = 0; i < size1.y; i ++ ) { if( keypoints1[i].pt.x == -1 || keypoints1[i].pt.y == -1 ) { keypoints1.erase(keypoints1.begin() + i); //cout << "in delete keypoints1: " << i << " " << ++ tempcount << " points deleted" << endl; if(tempcount > size1.y) { exit(1); } i --; } } cv::Mat img_match; cv::drawMatches(testImgGray, keypoints, testImgGray1, keypoints1, matches, img_match); cout<<"number of matched points: "<<matches.size()<<endl; cv::imshow("matches",img_match); cv::waitKey(0); cout << "end!!" << endl; return 0; }
2a6ae8fabfa6b4548ae0aaf14c3fbc23918ab5ae.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <chrono> #include <cmath> #include <iostream> #include <numeric> #define GLM_ENABLE_EXPERIMENTAL #include <CLI/CLI11.hpp> #include <SDL2/SDL.h> #include <hip/hip_runtime.h> #include "common.hpp" constexpr float PI = 3.1415926535897932f; using namespace std; __device__ __host__ uint8_t computePixel(int x, int y, float period) { return 255 * (sinf(x*2.0f*PI/ period) + 1.0f) * (sinf(y*2.0f*PI/ period) + 1.0f) / 4.0f; } __global__ void computeDotsCuda(uint8_t* pixels, int2 dim, float period) { __shared__ uint8_t shared[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * dim.x; shared[threadIdx.x][threadIdx.y] = computePixel(x, y, period); __syncthreads(); pixels[offset * 3 + 2] = shared[15 - threadIdx.x][15 - threadIdx.y]; } __global__ void computeDotsCudaNoSync(uint8_t* pixels, int2 dim, float period) { __shared__ uint8_t shared[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * dim.x; shared[threadIdx.x][threadIdx.y] = computePixel(x, y, period); pixels[offset * 3 + 2] = shared[15 - threadIdx.x][15 - threadIdx.y]; } void computeDotsCpu(uint8_t* pixels, int2 dim, float period, bool omp) { #pragma omp parallel for if(omp) for (int y = 0; y < dim.y; ++y) { #pragma omp parallel for if(omp) for (int x = 0; x < dim.x; ++x) { uint64_t offset = x + y * dim.x; uint8_t value = computePixel(x, y, period); pixels[offset * 3 + 2] = value; } } } enum class Mode { CPU, OMP, CUDA, CUDA_NOSYNC }; int main(int argc, char *argv[]) { CLI::App app("Stuff"); dim3 threadsPerBlock { 16, 16 }; int2 dim { 640, 480 }; float period = 128.0; string modeString = "CPU"; Mode mode = Mode::CPU; try { app.add_option("--period", period, "Dot period", true); app.add_option("--width", dim.x, "Image width in pixels", true); app.add_option("--height", dim.y, "Image height in pixels", true); app.add_option("--threadx", threadsPerBlock.x, "Horizontal threads per block", true); app.add_option("--thready", threadsPerBlock.y, "Vertical threads per block", true); app.add_flag("-n", "Don't initialize memory with zeroes"); app.add_option("--mode", modeString, "Rendering mode (CPU, OMP, CUDA, CUDA_NOSYNC)", true)->check([](const string& m) { return (m == "CPU" || m == "OMP" || m == "CUDA" || m == "CUDA_NOSYNC"); }); app.parse(argc, argv); if (modeString == "CPU") { mode = Mode::CPU; } else if (modeString == "OMP") { mode = Mode::OMP; } else if (modeString == "CUDA") { mode = Mode::CUDA; } else if (modeString == "CUDA_NOSYNC") { mode = Mode::CUDA_NOSYNC; } } catch (CLI::Error &e) { return app.exit(e); } dim3 blocks(dim.x / threadsPerBlock.x, dim.y / threadsPerBlock.y); // Initialization handleSdl(SDL_Init(SDL_INIT_VIDEO) == 0); SdlWindow window("", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, dim.x, dim.y, SDL_WINDOW_HIDDEN); vector<uint8_t> pixels; if (app.count("-n") == 0) { // If we want to zero the memory initially... pixels.resize(dim.x * dim.y * 3, 0); } else { pixels.reserve(dim.x * dim.y * 3); } float duration = 0.0; if (mode == Mode::CPU || mode == Mode::OMP) { // CPU version auto cpuStart = chrono::high_resolution_clock::now(); { computeDotsCpu(pixels.data(), dim, period, mode == Mode::OMP); } auto cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0; } else if (mode == Mode::CUDA || mode == Mode::CUDA_NOSYNC) { CudaEvent cudaStart; CudaEvent cudaFinish; // CUDA version uint8_t *cudaResults = nullptr; handle(hipMalloc(&cudaResults, dim.x * dim.y * 3 * sizeof(*cudaResults))); if (app.count("-n") == 0) { handle(hipMemset(cudaResults, 0, dim.x * dim.y * 3)); } handle(hipEventRecord(cudaStart.event, 0)); { if (mode == Mode::CUDA) { hipLaunchKernelGGL(( computeDotsCuda), dim3(blocks), dim3(threadsPerBlock), 0, 0, cudaResults, dim, period); } else if (mode == Mode::CUDA_NOSYNC) { hipLaunchKernelGGL(( computeDotsCudaNoSync), dim3(blocks), dim3(threadsPerBlock), 0, 0, cudaResults, dim, period); } handle(hipMemcpy(pixels.data(), cudaResults, pixels.size(), hipMemcpyDeviceToHost)); } handle(hipEventRecord(cudaFinish.event, 0)); handle(hipEventSynchronize(cudaFinish.event)); handle( hipEventElapsedTime(&duration, cudaStart.event, cudaFinish.event)); handle(hipFree(cudaResults)); } { SDL_ShowWindow(window.window); handleSdl(SDL_UpdateTexture(window.texture, nullptr, pixels.data(), 3 * dim.x) == 0); handleSdl(SDL_RenderClear(window.renderer) == 0); handleSdl(SDL_RenderCopy(window.renderer, window.texture, nullptr, nullptr) == 0); SDL_RenderPresent(window.renderer); ostringstream windowTitle; windowTitle << "Blue Dots (" << modeString << ") " << duration << "ms"; SDL_SetWindowTitle(window.window, windowTitle.str().c_str()); } bool quit = false; SDL_Event event; while (!quit) { while (SDL_PollEvent(&event) != 0) { if (isWindowClosed(event)) { quit = true; } } SDL_Delay(33); } SDL_Quit(); return 0; }
2a6ae8fabfa6b4548ae0aaf14c3fbc23918ab5ae.cu
#include <cassert> #include <chrono> #include <cmath> #include <iostream> #include <numeric> #define GLM_ENABLE_EXPERIMENTAL #include <CLI/CLI11.hpp> #include <SDL2/SDL.h> #include <cuda_runtime.h> #include "common.hpp" constexpr float PI = 3.1415926535897932f; using namespace std; __device__ __host__ uint8_t computePixel(int x, int y, float period) { return 255 * (sinf(x*2.0f*PI/ period) + 1.0f) * (sinf(y*2.0f*PI/ period) + 1.0f) / 4.0f; } __global__ void computeDotsCuda(uint8_t* pixels, int2 dim, float period) { __shared__ uint8_t shared[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * dim.x; shared[threadIdx.x][threadIdx.y] = computePixel(x, y, period); __syncthreads(); pixels[offset * 3 + 2] = shared[15 - threadIdx.x][15 - threadIdx.y]; } __global__ void computeDotsCudaNoSync(uint8_t* pixels, int2 dim, float period) { __shared__ uint8_t shared[16][16]; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * dim.x; shared[threadIdx.x][threadIdx.y] = computePixel(x, y, period); pixels[offset * 3 + 2] = shared[15 - threadIdx.x][15 - threadIdx.y]; } void computeDotsCpu(uint8_t* pixels, int2 dim, float period, bool omp) { #pragma omp parallel for if(omp) for (int y = 0; y < dim.y; ++y) { #pragma omp parallel for if(omp) for (int x = 0; x < dim.x; ++x) { uint64_t offset = x + y * dim.x; uint8_t value = computePixel(x, y, period); pixels[offset * 3 + 2] = value; } } } enum class Mode { CPU, OMP, CUDA, CUDA_NOSYNC }; int main(int argc, char *argv[]) { CLI::App app("Stuff"); dim3 threadsPerBlock { 16, 16 }; int2 dim { 640, 480 }; float period = 128.0; string modeString = "CPU"; Mode mode = Mode::CPU; try { app.add_option("--period", period, "Dot period", true); app.add_option("--width", dim.x, "Image width in pixels", true); app.add_option("--height", dim.y, "Image height in pixels", true); app.add_option("--threadx", threadsPerBlock.x, "Horizontal threads per block", true); app.add_option("--thready", threadsPerBlock.y, "Vertical threads per block", true); app.add_flag("-n", "Don't initialize memory with zeroes"); app.add_option("--mode", modeString, "Rendering mode (CPU, OMP, CUDA, CUDA_NOSYNC)", true)->check([](const string& m) { return (m == "CPU" || m == "OMP" || m == "CUDA" || m == "CUDA_NOSYNC"); }); app.parse(argc, argv); if (modeString == "CPU") { mode = Mode::CPU; } else if (modeString == "OMP") { mode = Mode::OMP; } else if (modeString == "CUDA") { mode = Mode::CUDA; } else if (modeString == "CUDA_NOSYNC") { mode = Mode::CUDA_NOSYNC; } } catch (CLI::Error &e) { return app.exit(e); } dim3 blocks(dim.x / threadsPerBlock.x, dim.y / threadsPerBlock.y); // Initialization handleSdl(SDL_Init(SDL_INIT_VIDEO) == 0); SdlWindow window("", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, dim.x, dim.y, SDL_WINDOW_HIDDEN); vector<uint8_t> pixels; if (app.count("-n") == 0) { // If we want to zero the memory initially... pixels.resize(dim.x * dim.y * 3, 0); } else { pixels.reserve(dim.x * dim.y * 3); } float duration = 0.0; if (mode == Mode::CPU || mode == Mode::OMP) { // CPU version auto cpuStart = chrono::high_resolution_clock::now(); { computeDotsCpu(pixels.data(), dim, period, mode == Mode::OMP); } auto cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0; } else if (mode == Mode::CUDA || mode == Mode::CUDA_NOSYNC) { CudaEvent cudaStart; CudaEvent cudaFinish; // CUDA version uint8_t *cudaResults = nullptr; handle(cudaMalloc(&cudaResults, dim.x * dim.y * 3 * sizeof(*cudaResults))); if (app.count("-n") == 0) { handle(cudaMemset(cudaResults, 0, dim.x * dim.y * 3)); } handle(cudaEventRecord(cudaStart.event, 0)); { if (mode == Mode::CUDA) { computeDotsCuda<<<blocks, threadsPerBlock>>>(cudaResults, dim, period); } else if (mode == Mode::CUDA_NOSYNC) { computeDotsCudaNoSync<<<blocks, threadsPerBlock>>>(cudaResults, dim, period); } handle(cudaMemcpy(pixels.data(), cudaResults, pixels.size(), cudaMemcpyDeviceToHost)); } handle(cudaEventRecord(cudaFinish.event, 0)); handle(cudaEventSynchronize(cudaFinish.event)); handle( cudaEventElapsedTime(&duration, cudaStart.event, cudaFinish.event)); handle(cudaFree(cudaResults)); } { SDL_ShowWindow(window.window); handleSdl(SDL_UpdateTexture(window.texture, nullptr, pixels.data(), 3 * dim.x) == 0); handleSdl(SDL_RenderClear(window.renderer) == 0); handleSdl(SDL_RenderCopy(window.renderer, window.texture, nullptr, nullptr) == 0); SDL_RenderPresent(window.renderer); ostringstream windowTitle; windowTitle << "Blue Dots (" << modeString << ") " << duration << "ms"; SDL_SetWindowTitle(window.window, windowTitle.str().c_str()); } bool quit = false; SDL_Event event; while (!quit) { while (SDL_PollEvent(&event) != 0) { if (isWindowClosed(event)) { quit = true; } } SDL_Delay(33); } SDL_Quit(); return 0; }
64c76b6d6058ab0d4ac9834a7dcc59165dc6d684.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <iostream> #include <vector> #include <thread> #include "gpu_kernel.cuh" #include "tsetlin_random_wheel.cuh" using namespace std; GPUKernel::GPUKernel(){ int available_gpus; // Query for amount of available GPUs if(hipGetDeviceCount(&available_gpus) != hipSuccess) { printf("KernelError: Unable to query for the amount of GPUs available! Check that the CUDA driver is properly installed.\n"); } if(available_gpus == 0) { printf("KernelError: The CUDA driver could not find any compatible GPUs.\n"); } }; void GPUKernel::enable_ssl_s(double delta_s) { // Store the delta for s this->delta_s = delta_s; // Enable the SSL for s flag this->ssl_s_enabled = true; } void GPUKernel::enable_gpu(unsigned int gpu_id) { // Try to select the correct GPU this->select_gpu(gpu_id); // Create a new configuration struct this->enabled_gpus.push_back(gpu_id); } void GPUKernel::remove_gpu(unsigned int gpu_id) { for(unsigned int index = 0; index < this->enabled_gpus.size(); index++) { if(this->enabled_gpus[index] == gpu_id) { this->enabled_gpus.erase(this->enabled_gpus.begin() + (index - 1)); break; } } } void GPUKernel::load_model(const unsigned int* model, unsigned int classes, unsigned int clauses, unsigned int automatas, unsigned int states) { // Store the configuration to the kernel class this->classes_amount = classes; this->clauses_amount = clauses; this->automatas_amount = automatas; this->features_amount = static_cast<unsigned int>(automatas / 2); this->states_amount = states; // Check if an model has already been loaded, if so, deallocate its memory if(this->model != nullptr) { hipFree(&this->model); this->model = nullptr; } // Attempt to allocate unified memory for the model if(hipMallocManaged(&this->model, sizeof(unsigned int) * classes * clauses * automatas, hipMemAttachGlobal) != hipSuccess) { printf("load_model(): Unable to allocate unified memory for the model\n"); return; } // Copy the data to GPU memory hipMemcpy(this->model, model, sizeof(unsigned int) * classes * clauses * automatas, hipMemcpyHostToHost); } void GPUKernel::load_training_data(const unsigned int* x_train, const unsigned int* y_train, unsigned int train_samples_n) { // First cleanup if previous training data has been loaded if(this->x_train != nullptr) { hipFree(&this->x_train); this->x_train = nullptr; } if(this->y_train != nullptr) { delete [] this->y_train; this->y_train = nullptr; } // Attempt to allocate unified memory for the x_data and y_data if(hipMallocManaged(&this->x_train, sizeof(unsigned int) * train_samples_n * features_amount, hipMemAttachGlobal) != hipSuccess) { printf("load_training_data(): Unable to allocate unified memory for the x_train data\n"); return; } // Allocate memory for y_train this->y_train = new unsigned int[train_samples_n]; // Copy the x train data to GPU memory hipMemcpy(this->x_train, x_train, sizeof(unsigned int) * train_samples_n * features_amount, hipMemcpyHostToHost); // Copy the y train data to Host memory memcpy(this->y_train, y_train, sizeof(unsigned int) * train_samples_n); // Store how many samples that have been loaded this->samples_train_n = train_samples_n; } void GPUKernel::load_validation_data(const unsigned int* x_val, const unsigned int* y_val, unsigned int val_samples_n) { // First cleanup if previous training data has been loaded if(this->x_val != nullptr) { hipFree(&this->x_val); this->x_val = nullptr; } if(this->y_val != nullptr) { delete [] this->y_val; this->y_val = nullptr; } // Attempt to allocate unified memory for the x_data and y_data if(hipMallocManaged(&this->x_val, sizeof(unsigned int) * val_samples_n * this->features_amount, hipMemAttachGlobal) != hipSuccess) { printf("load_validation_data(): Unable to allocate unified memory for the x_val data\n"); return; } // Allocate memory for y_train this->y_val = new unsigned int[val_samples_n]; // Copy the x val data to GPU memory hipMemcpy(this->x_val, x_val, sizeof(unsigned int) * val_samples_n * features_amount, hipMemcpyHostToHost); // Copy the y val data to Host memory memcpy(this->y_val, y_val, sizeof(unsigned int) * val_samples_n); // Store how many samples that have been loaded this->samples_val_n = val_samples_n; } void GPUKernel::fit(int epochs, int batches, bool validation, int threshold, float s, bool feedback, bool print_model_after_epoch) { // Check if we have enabled any GPUs if(this->enabled_gpus.size() == 0) { printf("fit(): No GPUs has been enabled. Please enable some gpu's before trying to fit the data\n"); return; } // Declare an array for the worker threads std::thread* worker_threads = new std::thread[this->classes_amount]; // Create a new random generator TsetlinRandomWheel* random_generator = new TsetlinRandomWheel(rand(), this->classes_amount, 65565); // Create time objects float* training_times = new float[this->classes_amount]; // Create an array that will hold the accuracy for each epochs double* accuracy_epochs = (double*) malloc(sizeof(double) * epochs); float s_tempromary = s; // Declare filestream for writing the results std::ofstream result_stream; // If feedback, open a result file if(feedback == true) { // Open the filestream result_stream.open("training_result.csv"); // Write the header of the file result_stream << "epoch;accuracy;s;s_temp;time\n"; } // Start looping the epochs for(int epoch = 1; epoch <= epochs; epoch++) { // Print feedback if(feedback == true){ printf("Epoch %d \n", epoch); } // Check if we are using ssl on the S value if(validation == true && this->ssl_s_enabled == true) { // Check if we are on a epoch that is divisble by 3 (then we are to perform a calculation for the new S value) if((epoch % 3) == 0 && epoch != 0) { // Check if one of the variants on the S value is better than the current one if(::max(accuracy_epochs[epoch-2], accuracy_epochs[epoch-1]) > accuracy_epochs[epoch-3]){ // The accuracy has been better when adjusting down or up // Lets figure out which way to adjust, lets check if the optimal was to adjust up if(accuracy_epochs[epoch-2] > accuracy_epochs[epoch-1]) { s -= this->delta_s; printf("Adjusting the S value to %f \n", s); } // Assuming that moving the S in positive direction is better. Or that they are equal, in that case, increase. else{ s += this->delta_s; printf("Adjusting the S value to %f \n", s); } // Set the tempromary s value to the new s value s_tempromary = s; } } // Check if we are on the epoch to decrement the S value else if((epoch % 3) == 1) { s_tempromary = s - this->delta_s; if(feedback == true) { printf("Current S value %f \n", s); printf("Attempting S value %f \n", s_tempromary); } } // Check if we are on the epoch to increment the S value else if((epoch % 3) == 2){ s_tempromary = s + this->delta_s; if(feedback == true) { printf("Current S value %f \n", s); printf("Attempting S value %f \n", s_tempromary); } } } // Start the epoch timer auto start = chrono::high_resolution_clock::now(); // Start all the worker threads for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create a thread for each of the classes that will train and pass the parameters worker_threads[class_id] = std::thread( &GPUKernel::train_class_one_epoch, class_id, this->enabled_gpus[class_id % this->enabled_gpus.size()], batches, threshold, s_tempromary, this->model, this->x_train, this->y_train, this->samples_train_n, this->classes_amount, this->clauses_amount, this->features_amount, this->automatas_amount, this->states_amount, training_times, random_generator ); } // Wait for all the threads to finish for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create a thread for each of the classes that will train and pass the parameters worker_threads[class_id].join(); } // Stop the timer auto stop = chrono::high_resolution_clock::now(); // Calculate the time used in seconds double time_used = chrono::duration_cast<chrono::nanoseconds>(stop - start).count() / 1000000000.0; // Check if we are to print the time for each class if(feedback) { printf("\nTraining time for classes: \n"); for(unsigned int class_id = 0; class_id < classes_amount; class_id++) { printf("\t- Class %d: %f seconds\n", class_id, (training_times[class_id]/1000)); } printf("\n"); } // Check if we are to validate our model against the loaded validation data if(validation == true) { // If validation is turned on validate(feedback); // Print to file result_stream << epoch << ";" << accuracy_epochs[epoch] << ";" << s << ";" <<s_tempromary << ";" << time_used << "\n"; } if(print_model_after_epoch == true) { print_model(); } } // Some cleanup after the training is done delete [] worker_threads; delete [] training_times; } double GPUKernel::validate(bool feedback) { // Create some variables double accuracy {0.0}; unsigned int correct_guesses {0}; unsigned int wrong_guesses {0}; unsigned int* correct_guesses_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* wrong_guesses_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* total_predicted_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* total_samples_for_class = new unsigned int[this->classes_amount] {0}; unsigned int correct_class; unsigned int temp_highest_class; int temp_highest_score; // GPU Arrays int* scores; hipMallocManaged(&scores, sizeof(int) * this->classes_amount * this->samples_val_n, hipMemAttachGlobal); hipMemset(&scores, 0, sizeof(int) * this->classes_amount * this->samples_val_n); // Create an array that holds the threads that will validate each of the samples std::thread* worker_threads = new std::thread[this->classes_amount]; // Start the validation for each class for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create and start the thread worker_threads[class_id] = std::thread( &GPUKernel::validate_class, class_id, this->enabled_gpus[class_id % this->enabled_gpus.size()], this->model, this->x_val, this->y_val, scores, this->samples_val_n, this->classes_amount, this->clauses_amount, this->features_amount, this->automatas_amount, this->states_amount ); } // Wait for the threads to finish for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Join the thread worker_threads[class_id].join(); } // Calculate the output of for each sample for(unsigned int sample_id = 0; sample_id < this->samples_val_n; sample_id++) { // Just assign class 0 as the leading class temp_highest_class = 0; temp_highest_score = scores[(sample_id * this->classes_amount)]; correct_class = this->y_val[sample_id]; // Get the class with the most votes for(unsigned int class_id = 1; class_id < this->classes_amount; class_id++) { // Check if the current class has better score than previous if(temp_highest_score < scores[(sample_id * this->classes_amount) + class_id]) { temp_highest_score = scores[(sample_id * this->classes_amount) + class_id]; temp_highest_class = class_id; } } // Check if we were correct if(temp_highest_class == correct_class) { // Store the correct guess to results correct_guesses += 1; correct_guesses_for_class[temp_highest_class] += 1; } else{ // Store the wrong guess to results wrong_guesses += 1; wrong_guesses_for_class[temp_highest_class] += 1; } // Store how many times the class was predicted total_predicted_for_class[temp_highest_class] += 1; // Add how many guesses for that class that exists total_samples_for_class[correct_class] += 1; // printf("Guessed: %d, correct: %d, score: %d \n", temp_highest_class, correct_class, scores[(sample_id * this->classes_amount) + temp_highest_class]); } // Calculate the accuracy accuracy = (1.0 * correct_guesses) / (correct_guesses + wrong_guesses); // Check if we should print the results to console if(feedback == true) { // Print some info printf("Results from validation \n"); printf("Total samples: %d \n", this->samples_val_n); printf("Model accuracy: %f \n", accuracy); printf("Correct guesses: %d \n", correct_guesses); printf("Wrong guesses: %d \n", wrong_guesses); for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { printf("\n================ \n"); printf("Class: %d \n", class_id); if(total_predicted_for_class[class_id] != 0) { printf("Precission: %f \n", (1.0 * correct_guesses_for_class[class_id]) / (total_predicted_for_class[class_id])); } else { printf("Precission: N/A \n"); } if(total_samples_for_class[class_id] != 0) { printf("Recall: %f \n", (1.0 * correct_guesses_for_class[class_id]) / (total_samples_for_class[class_id])); } else { printf("Recall: N/A \n"); } printf("Samples: %d \n", total_samples_for_class[class_id]); printf("Correct guesses: %d \n", correct_guesses_for_class[class_id]); printf("Wrong guesses: %d \n", wrong_guesses_for_class[class_id]); } printf("\n\n"); } // Some cleanup after the validation is done delete [] correct_guesses_for_class; delete [] wrong_guesses_for_class; delete [] total_predicted_for_class; delete [] total_samples_for_class; delete [] worker_threads; hipFree(&scores); return accuracy; } void GPUKernel::train_class_one_epoch(unsigned int class_id, unsigned int gpu_id, unsigned int batches, unsigned int threshold, float s, unsigned int* model, unsigned int* x_data, unsigned int* y_data, unsigned int samples, unsigned int classes_amount, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int states_amount, float* training_times, TsetlinRandomWheel* random_generator) { // Attempt to select the given GPU if(hipSetDevice(gpu_id) != hipSuccess) { printf("train_class_one_epoch(): Unable to switch to gpu for Class: %d, GPU: %d \n", class_id, gpu_id); return; } // Create a stream for the class hipStream_t class_stream; switch(hipStreamCreateWithFlags(&class_stream, hipStreamDefault)) { case hipSuccess: break; case hipErrorInvalidValue: printf("train_class_one(): An invalid value was passed to hipStreamCreateWithFlags on Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("train_class_one(): An unknown CUDA error occured on hipStreamCreateWithFlags with Class %d, on GPU %d, code: %d \n", class_id, gpu_id, hipGetLastError()); } // Allocate some memory that will be used during training bool* clauses_output; hipMalloc(&clauses_output, sizeof(bool) * clauses_amount); int* score; hipMalloc(&score, sizeof(int)); unsigned int* clauses_feedback; hipMallocManaged(&clauses_feedback, sizeof(unsigned int) * clauses_amount); hiprandState_t* random_states; hipMalloc(&random_states, sizeof(hiprandState_t) * clauses_amount * automatas_amount); // Declare some training specific variables bool correct_class; // Calculate the launch parameters for each kernel dim3 blocks = GPUKernel::calculate_blocks_per_kernel(clauses_amount); dim3 threads = GPUKernel::calculate_threads_per_block(automatas_amount); unsigned int automatas_total = clauses_amount * automatas_amount; unsigned int reduce_votes_blocks = 1; // Due to the nature of the kernel, anyway its not like there are millions of clauses unsigned int reduce_votes_threads = ((clauses_amount / 32) + 1) * 32; unsigned int calculate_feedback_blocks = 1; // This value will stay at one, unless we need more blocks unsigned int calculate_feedback_threads = ((clauses_amount / 32) + 1) * 32; if(calculate_feedback_threads > 1024) { // Update the amount of blocks that are required calculate_feedback_blocks = ((clauses_amount / calculate_feedback_threads)) + 1; // Set the amount of threads to be maximum calculate_feedback_threads = 1024; } // Check if we are above max threads per block if(reduce_votes_threads > 1024) { reduce_votes_threads = 1024; } // Initialize the random values hipLaunchKernelGGL(( initialize_random_states), dim3(blocks), dim3(threads), 0, class_stream, random_states, rand(), clauses_amount * automatas_amount); // Create two events that will be used to measure total epoch training time hipEvent_t start, stop; // Create the events hipEventCreate(&start); hipEventCreate(&stop); // Start the time hipEventRecord(start); // Start looping over the batches for(unsigned int batch_id = 0; batch_id < batches; batch_id++) { // Start looping over all of the samples for(unsigned int sample_id = 0; sample_id < samples; sample_id++) { // Check if the current class is the target class for this sample correct_class = (class_id == y_data[sample_id]); // Check if we are to train the sample on the current class or not if(correct_class || (random_generator->get_random_float(class_id)) < (1.0f / (1.0f * classes_amount))) { // Evaluate the clause output hipLaunchKernelGGL(( validate_clauses), dim3((automatas_total / 32) + 1), dim3(32), 0, class_stream, model, clauses_output, x_data, sample_id, clauses_amount, features_amount, automatas_amount, class_id, states_amount, false ); // Count the votes from the evaluation phase hipLaunchKernelGGL(( reduce_votes), dim3(reduce_votes_blocks), dim3(reduce_votes_threads), sizeof(int) *( reduce_votes)_threads, class_stream, score, 0, clauses_output, clauses_amount, threshold ); // Calculate the feedback to each clause hipLaunchKernelGGL(( calculate_feedback), dim3(calculate_feedback_blocks), dim3(calculate_feedback_threads), 0, class_stream, clauses_feedback, score, threshold, s, class_id, correct_class, clauses_amount, random_states ); // Perform feedback on the model hipLaunchKernelGGL(( give_feedback_to_clauses), dim3((automatas_total / 32) + 1), dim3(32), 0, class_stream, model, clauses_feedback, x_data, clauses_output, class_id, sample_id, correct_class, clauses_amount, features_amount, automatas_amount, states_amount, threshold, s, random_states ); } } } // Set a stop timer hipEventRecord(stop); // After launching all the kernels, try to wait for them to complete switch(hipStreamSynchronize(class_stream)) { case hipSuccess: break; case hipErrorInvalidResourceHandle: printf("train_class_one_epoch(): Unable to wait for stream before getting the score for Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("train_class_one_epoch(): An unknown CUDA error occured on hipStreamSynchronize with Class %d, on GPU %d \n", class_id, gpu_id); } // Stop the time hipEventSynchronize(stop); // Calculate the time difference hipEventElapsedTime(&training_times[class_id], start, stop); // Free up space that was used during training hipFree(clauses_output); hipFree(score); hipFree(clauses_feedback); hipFree(random_states); // Attempt to destroy the stream if(hipStreamDestroy(class_stream) != hipSuccess) { printf("train_class_one_epoch(): Unable to destroy stream for Class %d, on GPU %d \n", class_id, gpu_id); } } void GPUKernel::validate_class(unsigned int class_id, unsigned int gpu_id, unsigned int* model, unsigned int* x_val, unsigned int* y_val, int* scores, unsigned int samples_amount, unsigned int classes_amount, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state) { // Select the given GPU if(hipSetDevice(gpu_id) != hipSuccess) { printf("validate_class(): Unable to select GPU for Class: %d, GPU: %d \n", class_id, gpu_id); return; } // Create a stream for the thread hipStream_t class_stream; switch(hipStreamCreateWithFlags(&class_stream, hipStreamDefault)) { case hipSuccess: break; case hipErrorInvalidValue: printf("validate_class(): An invalid value was passed to hipStreamCreateWithFlags on Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("validate_class(): An unknown CUDA error occured on hipStreamCreateWithFlags with Class %d, on GPU %d \n", class_id, gpu_id); } // Allocate memory for the clause outputs bool* clauses_output; hipMalloc(&clauses_output, sizeof(bool) * clauses_amount); // Calculate some launch parameters dim3 blocks = GPUKernel::calculate_blocks_per_kernel(clauses_amount); dim3 threads = GPUKernel::calculate_threads_per_block(automatas_amount); // Start the validation of the samples for(unsigned int sample_id = 0; sample_id < samples_amount; sample_id++) { // Validate the sample hipLaunchKernelGGL(( validate_clauses), dim3(blocks), dim3(threads), 0, class_stream, model, clauses_output, x_val, sample_id, clauses_amount, features_amount, automatas_amount, class_id, max_state, true ); // Count the votes hipLaunchKernelGGL(( reduce_votes), dim3(1), dim3(128), sizeof(int) * 128, class_stream, scores, ((sample_id * classes_amount) + class_id), clauses_output, clauses_amount, 0 ); } // Wait for the stream to finish switch(hipStreamSynchronize(class_stream)) { case hipSuccess: break; case hipErrorInvalidResourceHandle: printf("validate_class(): Unable to wait for stream before getting the score for Class %d, on GPU %d", class_id, gpu_id); break; default: printf("validate_class(): An unknown CUDA error occured on hipStreamSynchronize with Class %d, on GPU %d", class_id, gpu_id); } // Cleanup used memory hipFree(clauses_output); // Destroy the stream if(hipStreamDestroy(class_stream) != hipSuccess) { printf("validate_class(): Unable to destroy the stream for Class: %d, GPU %d \n", class_id, gpu_id); return; } } void GPUKernel::select_gpu(unsigned int gpu_id) { // Attempt to switch to the given GPU hipError_t code = hipSetDevice(gpu_id); if(code == hipErrorDeviceAlreadyInUse) { printf("select_gpu(): Could not switch to the GPU with an ID of: %u, because it is already in use \n", gpu_id); } else if(code == hipErrorInvalidDevice) { printf("select_gpu(): Could not switch to the GPU with an ID of: %u, because the GPU id does not exist in the CUDA driver\n", gpu_id); } } void GPUKernel::print_model() { printf("Model: \n"); for(unsigned int class_id = 0; class_id < this->classes_amount; class_id ++) { printf("Class: %d \n", class_id); for(unsigned int clause_id = 0; clause_id < this->clauses_amount; clause_id ++) { printf(" %d: ", clause_id); for(unsigned int automata_id = 0; automata_id < this->automatas_amount; automata_id ++) { printf("%d ", this->model[(class_id * this->clauses_amount * this->automatas_amount) + (clause_id * this->automatas_amount) + automata_id]); } printf("\n"); } printf("\n"); } printf("\n"); } dim3 GPUKernel::calculate_blocks_per_kernel(unsigned int clauses_amount) { return dim3(clauses_amount); } dim3 GPUKernel::calculate_threads_per_block(unsigned int automatas_amount) { if(automatas_amount > 1024) { automatas_amount = 1024; } else if(automatas_amount < 32) { automatas_amount = 32; } return dim3(automatas_amount); } GPUKernel::~GPUKernel() { // Free up memory from the devices if(this->model != nullptr) { hipFree(&this->model); } if(this->x_train != nullptr) { hipFree(&this->x_train); } if(this->x_val != nullptr) { hipFree(&this->x_val); } }
64c76b6d6058ab0d4ac9834a7dcc59165dc6d684.cu
#include <stdio.h> #include <iostream> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <iostream> #include <vector> #include <thread> #include "gpu_kernel.cuh" #include "tsetlin_random_wheel.cuh" using namespace std; GPUKernel::GPUKernel(){ int available_gpus; // Query for amount of available GPUs if(cudaGetDeviceCount(&available_gpus) != cudaSuccess) { printf("KernelError: Unable to query for the amount of GPUs available! Check that the CUDA driver is properly installed.\n"); } if(available_gpus == 0) { printf("KernelError: The CUDA driver could not find any compatible GPUs.\n"); } }; void GPUKernel::enable_ssl_s(double delta_s) { // Store the delta for s this->delta_s = delta_s; // Enable the SSL for s flag this->ssl_s_enabled = true; } void GPUKernel::enable_gpu(unsigned int gpu_id) { // Try to select the correct GPU this->select_gpu(gpu_id); // Create a new configuration struct this->enabled_gpus.push_back(gpu_id); } void GPUKernel::remove_gpu(unsigned int gpu_id) { for(unsigned int index = 0; index < this->enabled_gpus.size(); index++) { if(this->enabled_gpus[index] == gpu_id) { this->enabled_gpus.erase(this->enabled_gpus.begin() + (index - 1)); break; } } } void GPUKernel::load_model(const unsigned int* model, unsigned int classes, unsigned int clauses, unsigned int automatas, unsigned int states) { // Store the configuration to the kernel class this->classes_amount = classes; this->clauses_amount = clauses; this->automatas_amount = automatas; this->features_amount = static_cast<unsigned int>(automatas / 2); this->states_amount = states; // Check if an model has already been loaded, if so, deallocate its memory if(this->model != nullptr) { cudaFree(&this->model); this->model = nullptr; } // Attempt to allocate unified memory for the model if(cudaMallocManaged(&this->model, sizeof(unsigned int) * classes * clauses * automatas, cudaMemAttachGlobal) != cudaSuccess) { printf("load_model(): Unable to allocate unified memory for the model\n"); return; } // Copy the data to GPU memory cudaMemcpy(this->model, model, sizeof(unsigned int) * classes * clauses * automatas, cudaMemcpyHostToHost); } void GPUKernel::load_training_data(const unsigned int* x_train, const unsigned int* y_train, unsigned int train_samples_n) { // First cleanup if previous training data has been loaded if(this->x_train != nullptr) { cudaFree(&this->x_train); this->x_train = nullptr; } if(this->y_train != nullptr) { delete [] this->y_train; this->y_train = nullptr; } // Attempt to allocate unified memory for the x_data and y_data if(cudaMallocManaged(&this->x_train, sizeof(unsigned int) * train_samples_n * features_amount, cudaMemAttachGlobal) != cudaSuccess) { printf("load_training_data(): Unable to allocate unified memory for the x_train data\n"); return; } // Allocate memory for y_train this->y_train = new unsigned int[train_samples_n]; // Copy the x train data to GPU memory cudaMemcpy(this->x_train, x_train, sizeof(unsigned int) * train_samples_n * features_amount, cudaMemcpyHostToHost); // Copy the y train data to Host memory memcpy(this->y_train, y_train, sizeof(unsigned int) * train_samples_n); // Store how many samples that have been loaded this->samples_train_n = train_samples_n; } void GPUKernel::load_validation_data(const unsigned int* x_val, const unsigned int* y_val, unsigned int val_samples_n) { // First cleanup if previous training data has been loaded if(this->x_val != nullptr) { cudaFree(&this->x_val); this->x_val = nullptr; } if(this->y_val != nullptr) { delete [] this->y_val; this->y_val = nullptr; } // Attempt to allocate unified memory for the x_data and y_data if(cudaMallocManaged(&this->x_val, sizeof(unsigned int) * val_samples_n * this->features_amount, cudaMemAttachGlobal) != cudaSuccess) { printf("load_validation_data(): Unable to allocate unified memory for the x_val data\n"); return; } // Allocate memory for y_train this->y_val = new unsigned int[val_samples_n]; // Copy the x val data to GPU memory cudaMemcpy(this->x_val, x_val, sizeof(unsigned int) * val_samples_n * features_amount, cudaMemcpyHostToHost); // Copy the y val data to Host memory memcpy(this->y_val, y_val, sizeof(unsigned int) * val_samples_n); // Store how many samples that have been loaded this->samples_val_n = val_samples_n; } void GPUKernel::fit(int epochs, int batches, bool validation, int threshold, float s, bool feedback, bool print_model_after_epoch) { // Check if we have enabled any GPUs if(this->enabled_gpus.size() == 0) { printf("fit(): No GPUs has been enabled. Please enable some gpu's before trying to fit the data\n"); return; } // Declare an array for the worker threads std::thread* worker_threads = new std::thread[this->classes_amount]; // Create a new random generator TsetlinRandomWheel* random_generator = new TsetlinRandomWheel(rand(), this->classes_amount, 65565); // Create time objects float* training_times = new float[this->classes_amount]; // Create an array that will hold the accuracy for each epochs double* accuracy_epochs = (double*) malloc(sizeof(double) * epochs); float s_tempromary = s; // Declare filestream for writing the results std::ofstream result_stream; // If feedback, open a result file if(feedback == true) { // Open the filestream result_stream.open("training_result.csv"); // Write the header of the file result_stream << "epoch;accuracy;s;s_temp;time\n"; } // Start looping the epochs for(int epoch = 1; epoch <= epochs; epoch++) { // Print feedback if(feedback == true){ printf("Epoch %d \n", epoch); } // Check if we are using ssl on the S value if(validation == true && this->ssl_s_enabled == true) { // Check if we are on a epoch that is divisble by 3 (then we are to perform a calculation for the new S value) if((epoch % 3) == 0 && epoch != 0) { // Check if one of the variants on the S value is better than the current one if(std::max(accuracy_epochs[epoch-2], accuracy_epochs[epoch-1]) > accuracy_epochs[epoch-3]){ // The accuracy has been better when adjusting down or up // Lets figure out which way to adjust, lets check if the optimal was to adjust up if(accuracy_epochs[epoch-2] > accuracy_epochs[epoch-1]) { s -= this->delta_s; printf("Adjusting the S value to %f \n", s); } // Assuming that moving the S in positive direction is better. Or that they are equal, in that case, increase. else{ s += this->delta_s; printf("Adjusting the S value to %f \n", s); } // Set the tempromary s value to the new s value s_tempromary = s; } } // Check if we are on the epoch to decrement the S value else if((epoch % 3) == 1) { s_tempromary = s - this->delta_s; if(feedback == true) { printf("Current S value %f \n", s); printf("Attempting S value %f \n", s_tempromary); } } // Check if we are on the epoch to increment the S value else if((epoch % 3) == 2){ s_tempromary = s + this->delta_s; if(feedback == true) { printf("Current S value %f \n", s); printf("Attempting S value %f \n", s_tempromary); } } } // Start the epoch timer auto start = chrono::high_resolution_clock::now(); // Start all the worker threads for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create a thread for each of the classes that will train and pass the parameters worker_threads[class_id] = std::thread( &GPUKernel::train_class_one_epoch, class_id, this->enabled_gpus[class_id % this->enabled_gpus.size()], batches, threshold, s_tempromary, this->model, this->x_train, this->y_train, this->samples_train_n, this->classes_amount, this->clauses_amount, this->features_amount, this->automatas_amount, this->states_amount, training_times, random_generator ); } // Wait for all the threads to finish for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create a thread for each of the classes that will train and pass the parameters worker_threads[class_id].join(); } // Stop the timer auto stop = chrono::high_resolution_clock::now(); // Calculate the time used in seconds double time_used = chrono::duration_cast<chrono::nanoseconds>(stop - start).count() / 1000000000.0; // Check if we are to print the time for each class if(feedback) { printf("\nTraining time for classes: \n"); for(unsigned int class_id = 0; class_id < classes_amount; class_id++) { printf("\t- Class %d: %f seconds\n", class_id, (training_times[class_id]/1000)); } printf("\n"); } // Check if we are to validate our model against the loaded validation data if(validation == true) { // If validation is turned on validate(feedback); // Print to file result_stream << epoch << ";" << accuracy_epochs[epoch] << ";" << s << ";" <<s_tempromary << ";" << time_used << "\n"; } if(print_model_after_epoch == true) { print_model(); } } // Some cleanup after the training is done delete [] worker_threads; delete [] training_times; } double GPUKernel::validate(bool feedback) { // Create some variables double accuracy {0.0}; unsigned int correct_guesses {0}; unsigned int wrong_guesses {0}; unsigned int* correct_guesses_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* wrong_guesses_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* total_predicted_for_class = new unsigned int[this->classes_amount] {0}; unsigned int* total_samples_for_class = new unsigned int[this->classes_amount] {0}; unsigned int correct_class; unsigned int temp_highest_class; int temp_highest_score; // GPU Arrays int* scores; cudaMallocManaged(&scores, sizeof(int) * this->classes_amount * this->samples_val_n, cudaMemAttachGlobal); cudaMemset(&scores, 0, sizeof(int) * this->classes_amount * this->samples_val_n); // Create an array that holds the threads that will validate each of the samples std::thread* worker_threads = new std::thread[this->classes_amount]; // Start the validation for each class for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Create and start the thread worker_threads[class_id] = std::thread( &GPUKernel::validate_class, class_id, this->enabled_gpus[class_id % this->enabled_gpus.size()], this->model, this->x_val, this->y_val, scores, this->samples_val_n, this->classes_amount, this->clauses_amount, this->features_amount, this->automatas_amount, this->states_amount ); } // Wait for the threads to finish for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { // Join the thread worker_threads[class_id].join(); } // Calculate the output of for each sample for(unsigned int sample_id = 0; sample_id < this->samples_val_n; sample_id++) { // Just assign class 0 as the leading class temp_highest_class = 0; temp_highest_score = scores[(sample_id * this->classes_amount)]; correct_class = this->y_val[sample_id]; // Get the class with the most votes for(unsigned int class_id = 1; class_id < this->classes_amount; class_id++) { // Check if the current class has better score than previous if(temp_highest_score < scores[(sample_id * this->classes_amount) + class_id]) { temp_highest_score = scores[(sample_id * this->classes_amount) + class_id]; temp_highest_class = class_id; } } // Check if we were correct if(temp_highest_class == correct_class) { // Store the correct guess to results correct_guesses += 1; correct_guesses_for_class[temp_highest_class] += 1; } else{ // Store the wrong guess to results wrong_guesses += 1; wrong_guesses_for_class[temp_highest_class] += 1; } // Store how many times the class was predicted total_predicted_for_class[temp_highest_class] += 1; // Add how many guesses for that class that exists total_samples_for_class[correct_class] += 1; // printf("Guessed: %d, correct: %d, score: %d \n", temp_highest_class, correct_class, scores[(sample_id * this->classes_amount) + temp_highest_class]); } // Calculate the accuracy accuracy = (1.0 * correct_guesses) / (correct_guesses + wrong_guesses); // Check if we should print the results to console if(feedback == true) { // Print some info printf("Results from validation \n"); printf("Total samples: %d \n", this->samples_val_n); printf("Model accuracy: %f \n", accuracy); printf("Correct guesses: %d \n", correct_guesses); printf("Wrong guesses: %d \n", wrong_guesses); for(unsigned int class_id = 0; class_id < this->classes_amount; class_id++) { printf("\n================ \n"); printf("Class: %d \n", class_id); if(total_predicted_for_class[class_id] != 0) { printf("Precission: %f \n", (1.0 * correct_guesses_for_class[class_id]) / (total_predicted_for_class[class_id])); } else { printf("Precission: N/A \n"); } if(total_samples_for_class[class_id] != 0) { printf("Recall: %f \n", (1.0 * correct_guesses_for_class[class_id]) / (total_samples_for_class[class_id])); } else { printf("Recall: N/A \n"); } printf("Samples: %d \n", total_samples_for_class[class_id]); printf("Correct guesses: %d \n", correct_guesses_for_class[class_id]); printf("Wrong guesses: %d \n", wrong_guesses_for_class[class_id]); } printf("\n\n"); } // Some cleanup after the validation is done delete [] correct_guesses_for_class; delete [] wrong_guesses_for_class; delete [] total_predicted_for_class; delete [] total_samples_for_class; delete [] worker_threads; cudaFree(&scores); return accuracy; } void GPUKernel::train_class_one_epoch(unsigned int class_id, unsigned int gpu_id, unsigned int batches, unsigned int threshold, float s, unsigned int* model, unsigned int* x_data, unsigned int* y_data, unsigned int samples, unsigned int classes_amount, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int states_amount, float* training_times, TsetlinRandomWheel* random_generator) { // Attempt to select the given GPU if(cudaSetDevice(gpu_id) != cudaSuccess) { printf("train_class_one_epoch(): Unable to switch to gpu for Class: %d, GPU: %d \n", class_id, gpu_id); return; } // Create a stream for the class cudaStream_t class_stream; switch(cudaStreamCreateWithFlags(&class_stream, cudaStreamDefault)) { case cudaSuccess: break; case cudaErrorInvalidValue: printf("train_class_one(): An invalid value was passed to cudaStreamCreateWithFlags on Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("train_class_one(): An unknown CUDA error occured on cudaStreamCreateWithFlags with Class %d, on GPU %d, code: %d \n", class_id, gpu_id, cudaGetLastError()); } // Allocate some memory that will be used during training bool* clauses_output; cudaMalloc(&clauses_output, sizeof(bool) * clauses_amount); int* score; cudaMalloc(&score, sizeof(int)); unsigned int* clauses_feedback; cudaMallocManaged(&clauses_feedback, sizeof(unsigned int) * clauses_amount); curandState* random_states; cudaMalloc(&random_states, sizeof(curandState) * clauses_amount * automatas_amount); // Declare some training specific variables bool correct_class; // Calculate the launch parameters for each kernel dim3 blocks = GPUKernel::calculate_blocks_per_kernel(clauses_amount); dim3 threads = GPUKernel::calculate_threads_per_block(automatas_amount); unsigned int automatas_total = clauses_amount * automatas_amount; unsigned int reduce_votes_blocks = 1; // Due to the nature of the kernel, anyway its not like there are millions of clauses unsigned int reduce_votes_threads = ((clauses_amount / 32) + 1) * 32; unsigned int calculate_feedback_blocks = 1; // This value will stay at one, unless we need more blocks unsigned int calculate_feedback_threads = ((clauses_amount / 32) + 1) * 32; if(calculate_feedback_threads > 1024) { // Update the amount of blocks that are required calculate_feedback_blocks = ((clauses_amount / calculate_feedback_threads)) + 1; // Set the amount of threads to be maximum calculate_feedback_threads = 1024; } // Check if we are above max threads per block if(reduce_votes_threads > 1024) { reduce_votes_threads = 1024; } // Initialize the random values initialize_random_states<<<blocks, threads, 0, class_stream>>>(random_states, rand(), clauses_amount * automatas_amount); // Create two events that will be used to measure total epoch training time cudaEvent_t start, stop; // Create the events cudaEventCreate(&start); cudaEventCreate(&stop); // Start the time cudaEventRecord(start); // Start looping over the batches for(unsigned int batch_id = 0; batch_id < batches; batch_id++) { // Start looping over all of the samples for(unsigned int sample_id = 0; sample_id < samples; sample_id++) { // Check if the current class is the target class for this sample correct_class = (class_id == y_data[sample_id]); // Check if we are to train the sample on the current class or not if(correct_class || (random_generator->get_random_float(class_id)) < (1.0f / (1.0f * classes_amount))) { // Evaluate the clause output validate_clauses<<<(automatas_total / 32) + 1, 32, 0, class_stream>>>( model, clauses_output, x_data, sample_id, clauses_amount, features_amount, automatas_amount, class_id, states_amount, false ); // Count the votes from the evaluation phase reduce_votes<<<reduce_votes_blocks, reduce_votes_threads, sizeof(int) * reduce_votes_threads, class_stream>>>( score, 0, clauses_output, clauses_amount, threshold ); // Calculate the feedback to each clause calculate_feedback<<<calculate_feedback_blocks, calculate_feedback_threads, 0, class_stream>>>( clauses_feedback, score, threshold, s, class_id, correct_class, clauses_amount, random_states ); // Perform feedback on the model give_feedback_to_clauses<<<(automatas_total / 32) + 1, 32, 0, class_stream>>>( model, clauses_feedback, x_data, clauses_output, class_id, sample_id, correct_class, clauses_amount, features_amount, automatas_amount, states_amount, threshold, s, random_states ); } } } // Set a stop timer cudaEventRecord(stop); // After launching all the kernels, try to wait for them to complete switch(cudaStreamSynchronize(class_stream)) { case cudaSuccess: break; case cudaErrorInvalidResourceHandle: printf("train_class_one_epoch(): Unable to wait for stream before getting the score for Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("train_class_one_epoch(): An unknown CUDA error occured on cudaStreamSynchronize with Class %d, on GPU %d \n", class_id, gpu_id); } // Stop the time cudaEventSynchronize(stop); // Calculate the time difference cudaEventElapsedTime(&training_times[class_id], start, stop); // Free up space that was used during training cudaFree(clauses_output); cudaFree(score); cudaFree(clauses_feedback); cudaFree(random_states); // Attempt to destroy the stream if(cudaStreamDestroy(class_stream) != cudaSuccess) { printf("train_class_one_epoch(): Unable to destroy stream for Class %d, on GPU %d \n", class_id, gpu_id); } } void GPUKernel::validate_class(unsigned int class_id, unsigned int gpu_id, unsigned int* model, unsigned int* x_val, unsigned int* y_val, int* scores, unsigned int samples_amount, unsigned int classes_amount, unsigned int clauses_amount, unsigned int features_amount, unsigned int automatas_amount, unsigned int max_state) { // Select the given GPU if(cudaSetDevice(gpu_id) != cudaSuccess) { printf("validate_class(): Unable to select GPU for Class: %d, GPU: %d \n", class_id, gpu_id); return; } // Create a stream for the thread cudaStream_t class_stream; switch(cudaStreamCreateWithFlags(&class_stream, cudaStreamDefault)) { case cudaSuccess: break; case cudaErrorInvalidValue: printf("validate_class(): An invalid value was passed to cudaStreamCreateWithFlags on Class %d, on GPU %d \n", class_id, gpu_id); break; default: printf("validate_class(): An unknown CUDA error occured on cudaStreamCreateWithFlags with Class %d, on GPU %d \n", class_id, gpu_id); } // Allocate memory for the clause outputs bool* clauses_output; cudaMalloc(&clauses_output, sizeof(bool) * clauses_amount); // Calculate some launch parameters dim3 blocks = GPUKernel::calculate_blocks_per_kernel(clauses_amount); dim3 threads = GPUKernel::calculate_threads_per_block(automatas_amount); // Start the validation of the samples for(unsigned int sample_id = 0; sample_id < samples_amount; sample_id++) { // Validate the sample validate_clauses<<<blocks, threads, 0, class_stream>>>( model, clauses_output, x_val, sample_id, clauses_amount, features_amount, automatas_amount, class_id, max_state, true ); // Count the votes reduce_votes<<<1, 128, sizeof(int) * 128, class_stream>>>( scores, ((sample_id * classes_amount) + class_id), clauses_output, clauses_amount, 0 ); } // Wait for the stream to finish switch(cudaStreamSynchronize(class_stream)) { case cudaSuccess: break; case cudaErrorInvalidResourceHandle: printf("validate_class(): Unable to wait for stream before getting the score for Class %d, on GPU %d", class_id, gpu_id); break; default: printf("validate_class(): An unknown CUDA error occured on cudaStreamSynchronize with Class %d, on GPU %d", class_id, gpu_id); } // Cleanup used memory cudaFree(clauses_output); // Destroy the stream if(cudaStreamDestroy(class_stream) != cudaSuccess) { printf("validate_class(): Unable to destroy the stream for Class: %d, GPU %d \n", class_id, gpu_id); return; } } void GPUKernel::select_gpu(unsigned int gpu_id) { // Attempt to switch to the given GPU cudaError code = cudaSetDevice(gpu_id); if(code == cudaErrorDeviceAlreadyInUse) { printf("select_gpu(): Could not switch to the GPU with an ID of: %u, because it is already in use \n", gpu_id); } else if(code == cudaErrorInvalidDevice) { printf("select_gpu(): Could not switch to the GPU with an ID of: %u, because the GPU id does not exist in the CUDA driver\n", gpu_id); } } void GPUKernel::print_model() { printf("Model: \n"); for(unsigned int class_id = 0; class_id < this->classes_amount; class_id ++) { printf("Class: %d \n", class_id); for(unsigned int clause_id = 0; clause_id < this->clauses_amount; clause_id ++) { printf(" %d: ", clause_id); for(unsigned int automata_id = 0; automata_id < this->automatas_amount; automata_id ++) { printf("%d ", this->model[(class_id * this->clauses_amount * this->automatas_amount) + (clause_id * this->automatas_amount) + automata_id]); } printf("\n"); } printf("\n"); } printf("\n"); } dim3 GPUKernel::calculate_blocks_per_kernel(unsigned int clauses_amount) { return dim3(clauses_amount); } dim3 GPUKernel::calculate_threads_per_block(unsigned int automatas_amount) { if(automatas_amount > 1024) { automatas_amount = 1024; } else if(automatas_amount < 32) { automatas_amount = 32; } return dim3(automatas_amount); } GPUKernel::~GPUKernel() { // Free up memory from the devices if(this->model != nullptr) { cudaFree(&this->model); } if(this->x_train != nullptr) { cudaFree(&this->x_train); } if(this->x_val != nullptr) { cudaFree(&this->x_val); } }
db57a851355a4fc0fefc8110671ca13f31c00b99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "kernels.hip" static int read_data(float *A0, int nx,int ny,int nz) { int s=0; for(int i=0;i<nz;i++) { for(int j=0;j<ny;j++) { for(int k=0;k<nx;k++) { A0[s] = 3; s++; } } } return 0; } int main(int argc, char** argv) { int nx,ny,nz; int size; int iteration; float c0=1.0f/6.0f; float c1=1.0f/6.0f/6.0f; if (argc<5) { printf("Usage: probe nx ny nz tx ty t\n" "nx: the grid size x\n" "ny: the grid size y\n" "nz: the grid size z\n" "t: the iteration time\n"); return -1; } nx = atoi(argv[1]); if (nx<1) return -1; ny = atoi(argv[2]); if (ny<1) return -1; nz = atoi(argv[3]); if (nz<1) return -1; iteration = atoi(argv[4]); if(iteration<1) return -1; float *h_A0; float *h_Anext; float *d_A0; float *d_Anext; size=nx*ny*nz; h_A0=(float*)malloc(sizeof(float)*size); h_Anext=(float*)malloc(sizeof(float)*size); read_data(h_A0, nx,ny,nz); hipMalloc((void **)&d_A0, size*sizeof(float)); hipMalloc((void **)&d_Anext, size*sizeof(float)); hipMemset(d_Anext,0,size*sizeof(float)); hipMemcpy(d_A0, h_A0, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Anext, d_A0, size*sizeof(float), hipMemcpyDeviceToDevice); int tx=32; int ty=4; dim3 block (tx, ty, 1); dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1); int sh_size = tx*2*ty*sizeof(float); for(int t=0;t<iteration;t++) {hipLaunchKernelGGL(( block2D_hybrid_coarsen_x), dim3(grid), dim3(block),sh_size, 0, c0,c1, d_A0, d_Anext, nx, ny, nz); float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; } float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; hipMemcpy(h_Anext, d_Anext,size*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_A0); hipFree(d_Anext); free (h_A0); free (h_Anext); return 0; }
db57a851355a4fc0fefc8110671ca13f31c00b99.cu
#include <stdio.h> #include <stdlib.h> #include "kernels.cu" static int read_data(float *A0, int nx,int ny,int nz) { int s=0; for(int i=0;i<nz;i++) { for(int j=0;j<ny;j++) { for(int k=0;k<nx;k++) { A0[s] = 3; s++; } } } return 0; } int main(int argc, char** argv) { int nx,ny,nz; int size; int iteration; float c0=1.0f/6.0f; float c1=1.0f/6.0f/6.0f; if (argc<5) { printf("Usage: probe nx ny nz tx ty t\n" "nx: the grid size x\n" "ny: the grid size y\n" "nz: the grid size z\n" "t: the iteration time\n"); return -1; } nx = atoi(argv[1]); if (nx<1) return -1; ny = atoi(argv[2]); if (ny<1) return -1; nz = atoi(argv[3]); if (nz<1) return -1; iteration = atoi(argv[4]); if(iteration<1) return -1; float *h_A0; float *h_Anext; float *d_A0; float *d_Anext; size=nx*ny*nz; h_A0=(float*)malloc(sizeof(float)*size); h_Anext=(float*)malloc(sizeof(float)*size); read_data(h_A0, nx,ny,nz); cudaMalloc((void **)&d_A0, size*sizeof(float)); cudaMalloc((void **)&d_Anext, size*sizeof(float)); cudaMemset(d_Anext,0,size*sizeof(float)); cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice); int tx=32; int ty=4; dim3 block (tx, ty, 1); dim3 grid ((nx+tx*2-1)/(tx*2), (ny+ty-1)/ty,1); int sh_size = tx*2*ty*sizeof(float); for(int t=0;t<iteration;t++) { block2D_hybrid_coarsen_x<<<grid, block,sh_size>>>(c0,c1, d_A0, d_Anext, nx, ny, nz); float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; } float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_A0); cudaFree(d_Anext); free (h_A0); free (h_Anext); return 0; }
77e6e3d54e4f69acb3ea894d59bd682a49131d9c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void wave1Drusanov3(double * f_next,double * f_tmp, double * f_in, double nu, double omega, int N){ int tid=threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_2m=tid-2; if(x_2m<0) x_2m+=N; int x_m = tid-1; if(x_m<0) x_m+=N; int x_p = tid+1; if(x_p>(N-1)) x_p-=N; int x_2p = tid+2; if(x_2p>(N-1)) x_2p-=N; double f_2m = f_in[x_2m]; double f_m = f_in[x_m]; double f = f_in[tid]; double f_p = f_in[x_p]; double f_2p = f_in[x_2p]; f_next[tid]=f-(nu/24.)*(-2.*f_2p+7.*f_p - 7.*f_m+2.*f_2m) -(3.*nu/8.)*(f_tmp[x_p]-f_tmp[x_m]) -(omega/24.)*(f_2p - 4.*f_p + 6.*f - 4.*f_m + f_2m); } }
77e6e3d54e4f69acb3ea894d59bd682a49131d9c.cu
__global__ void wave1Drusanov3(double * f_next,double * f_tmp, double * f_in, double nu, double omega, int N){ int tid=threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_2m=tid-2; if(x_2m<0) x_2m+=N; int x_m = tid-1; if(x_m<0) x_m+=N; int x_p = tid+1; if(x_p>(N-1)) x_p-=N; int x_2p = tid+2; if(x_2p>(N-1)) x_2p-=N; double f_2m = f_in[x_2m]; double f_m = f_in[x_m]; double f = f_in[tid]; double f_p = f_in[x_p]; double f_2p = f_in[x_2p]; f_next[tid]=f-(nu/24.)*(-2.*f_2p+7.*f_p - 7.*f_m+2.*f_2m) -(3.*nu/8.)*(f_tmp[x_p]-f_tmp[x_m]) -(omega/24.)*(f_2p - 4.*f_p + 6.*f - 4.*f_m + f_2m); } }
d3f49ed463017dbea150edec13697f29eef5b3f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <common/cuda_tools.hpp> namespace Scrfd{ static const int NUM_BOX_ELEMENT = 16; static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __device__ float sigmoid(float x){ return 1.0f / (1.0f + exp(-x)); } static __global__ void decode_kernel( float* predict, int num_bboxes, float deconfidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior_array ){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + 15 * position; // cx, cy, w, h, conf, keepflag, landmark0.x, landmark0.y, landmark1.x, landmark1.y, landmark2.x, landmark2.y float deconfidence = pitem[4]; if(deconfidence < deconfidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float* prior = prior_array + 4 * position; float dx = pitem[0] * prior[2]; float dy = pitem[1] * prior[2]; float dr = pitem[2] * prior[2]; float db = pitem[3] * prior[2]; float left = prior[0] - dx; float top = prior[1] - dy; float right = prior[0] + dr; float bottom = prior[1] + db; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = sigmoid(deconfidence); *pout_item++ = 1; // keepflag(1=keep, 0=ignore) float* landmark_predict = pitem + 5; for(int i = 0; i < 5; ++i){ float x = prior[0] + landmark_predict[0] * prior[2]; float y = prior[1] + landmark_predict[1] * prior[3]; affine_project(invert_affine_matrix, x, y, pout_item + 0, pout_item + 1); pout_item += 2; landmark_predict += 2; } } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // ioub > abia // pcurrent[5] = 0; // ignore return; } } } } static float desigmoid(float x){ return -log(1.0f / x - 1.0f); } void decode_kernel_invoker( float* predict, int num_bboxes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior, hipStream_t stream ){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); hipLaunchKernelGGL(( checkCudaKernel(decode_kernel), dim3(grid), dim3(block), 0, stream, predict, num_bboxes, desigmoid(confidence_threshold), nms_threshold, invert_affine_matrix, parray, max_objects, prior )); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); hipLaunchKernelGGL(( checkCudaKernel(nms_kernel), dim3(grid), dim3(block), 0, stream, parray, max_objects, nms_threshold)); } };
d3f49ed463017dbea150edec13697f29eef5b3f3.cu
#include <common/cuda_tools.hpp> namespace Scrfd{ static const int NUM_BOX_ELEMENT = 16; static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __device__ float sigmoid(float x){ return 1.0f / (1.0f + exp(-x)); } static __global__ void decode_kernel( float* predict, int num_bboxes, float deconfidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior_array ){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + 15 * position; // cx, cy, w, h, conf, keepflag, landmark0.x, landmark0.y, landmark1.x, landmark1.y, landmark2.x, landmark2.y float deconfidence = pitem[4]; if(deconfidence < deconfidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float* prior = prior_array + 4 * position; float dx = pitem[0] * prior[2]; float dy = pitem[1] * prior[2]; float dr = pitem[2] * prior[2]; float db = pitem[3] * prior[2]; float left = prior[0] - dx; float top = prior[1] - dy; float right = prior[0] + dr; float bottom = prior[1] + db; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = sigmoid(deconfidence); *pout_item++ = 1; // keepflag(1=keep, 0=ignore) float* landmark_predict = pitem + 5; for(int i = 0; i < 5; ++i){ float x = prior[0] + landmark_predict[0] * prior[2]; float y = prior[1] + landmark_predict[1] * prior[3]; affine_project(invert_affine_matrix, x, y, pout_item + 0, pout_item + 1); pout_item += 2; landmark_predict += 2; } } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ if(pitem[4] > pcurrent[4]){ // 如果发现iou大,并且b > a,置信度。b是第i个框,a是当前框 // 表示当前框要过滤掉,不需要保留了 pcurrent[5] = 0; // ignore return; } } } } static float desigmoid(float x){ return -log(1.0f / x - 1.0f); } void decode_kernel_invoker( float* predict, int num_bboxes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, float* prior, cudaStream_t stream ){ auto grid = CUDATools::grid_dims(num_bboxes); auto block = CUDATools::block_dims(num_bboxes); checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>( predict, num_bboxes, desigmoid(confidence_threshold), nms_threshold, invert_affine_matrix, parray, max_objects, prior )); grid = CUDATools::grid_dims(max_objects); block = CUDATools::block_dims(max_objects); checkCudaKernel(nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } };
56817aa10d2bfcef80afee105caf752512c9ee98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc.hpp> #include <iostream> #include <time.h> #include <cmath> #define Height 800 #define Width 800 #define batch 400 #define trials 400 #define rate 0.05 #define ReCoord -0.761574 #define ImCoord -0.0847596 #define Intensity 0.5 #define Speed 1 #define debug false using namespace cv; using namespace std; typedef struct { double* N; double* I; double* posN; double* posI; }Com; double mapp(double i, double Mid, double Range, int size) { return (((i / double(size - 1)) - 0.5) * Range) + Mid; } double func(double i, int size, double offset, double magnification) { return (((i / double(size - 1)) + offset) / magnification); } __global__ void calculate(Com C, uint8_t* Conf) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; C.I[index] = 0; C.N[index] = 0; double tempN = C.N[index]; double sqrMagN = tempN * tempN; double sqrMagI = C.I[index] * C.I[index]; Conf[index] = 255; int i = 0; for (i = 0; i < trials; i++) { tempN = C.N[index]; C.N[index] = (sqrMagN - sqrMagI) + C.posN[index]; C.I[index] = (2 * C.I[index] * tempN) + C.posI[index]; sqrMagN = (C.N[index] * C.N[index]); sqrMagI = (C.I[index] * C.I[index]); if ((sqrMagN + sqrMagI) >= 4) { if (i * Intensity < 256) Conf[index] = (uint8_t)(i * Intensity); else Conf[index] = 255; break; } } } __global__ void initialize(Com C, int bat, double iteration) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; C.I[index] = 0; C.N[index] = 0; C.posN[index] = ((((double)threadIdx.x / double(Width - 1)) - 0.5) / iteration) + ReCoord; C.posI[index] = ((((double)(bat * batch + blockIdx.x) / double(Height - 1)) - 0.5) / iteration) + ImCoord; } void checkDevice() { int nDevices; hipGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); } } int main() { checkDevice(); //Image Variable Mat image = Mat::zeros(Height, Width, CV_8UC1); //Com C; Com c_C; uint8_t* Conf; uint8_t* c_Conf; size_t size = sizeof(double) * Height * batch; //C.I = (double*)malloc(size); //C.N = (double*)malloc(size); Conf = (uint8_t*)malloc(sizeof(uint8_t) * Height * batch); hipMalloc(&c_C.I, size); hipMalloc(&c_C.N, size); hipMalloc(&c_C.posI, size); hipMalloc(&c_C.posN, size); hipMalloc(&c_Conf, sizeof(uint8_t) * Height * batch); int z = 0; int iter = 0; while (!debug) { for (z = 0; z < (Width / batch); z+=Speed) { int i = 0; int j = 0; initialize << <batch, Height >> > (c_C, z, (double)pow(2, (rate * (double)iter))); hipDeviceSynchronize(); hipMemcpy(c_Conf, Conf, sizeof(uint8_t) * Height * batch, hipMemcpyHostToDevice); calculate << <batch, Height >> > (c_C, c_Conf); hipDeviceSynchronize(); //hipMemcpy(C.I, c_C.I, size, hipMemcpyDeviceToHost); //hipMemcpy(C.N, c_C.N, size, hipMemcpyDeviceToHost); hipMemcpy(Conf, c_Conf, sizeof(uint8_t) * Height * batch, hipMemcpyDeviceToHost); for (j = 0; j < batch; j++) { for (i = 0; i < Height; i++) { image.data[((z * batch + j) * Height + i)] = Conf[(j * Height + i)]; } } } imshow("Display Window", image); char c = (char)waitKey(25); if (c == 27) break; iter++; } hipFree(c_C.I); hipFree(c_C.N); hipFree(c_C.posI); hipFree(c_C.posN); hipFree(c_Conf); free(Conf); //free(C.I); //free(C.N); //free(C.posI); //free(C.posN); /* imshow("Display Window", image); waitKey(0); */ return 0; }
56817aa10d2bfcef80afee105caf752512c9ee98.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc.hpp> #include <iostream> #include <time.h> #include <cmath> #define Height 800 #define Width 800 #define batch 400 #define trials 400 #define rate 0.05 #define ReCoord -0.761574 #define ImCoord -0.0847596 #define Intensity 0.5 #define Speed 1 #define debug false using namespace cv; using namespace std; typedef struct { double* N; double* I; double* posN; double* posI; }Com; double mapp(double i, double Mid, double Range, int size) { return (((i / double(size - 1)) - 0.5) * Range) + Mid; } double func(double i, int size, double offset, double magnification) { return (((i / double(size - 1)) + offset) / magnification); } __global__ void calculate(Com C, uint8_t* Conf) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; C.I[index] = 0; C.N[index] = 0; double tempN = C.N[index]; double sqrMagN = tempN * tempN; double sqrMagI = C.I[index] * C.I[index]; Conf[index] = 255; int i = 0; for (i = 0; i < trials; i++) { tempN = C.N[index]; C.N[index] = (sqrMagN - sqrMagI) + C.posN[index]; C.I[index] = (2 * C.I[index] * tempN) + C.posI[index]; sqrMagN = (C.N[index] * C.N[index]); sqrMagI = (C.I[index] * C.I[index]); if ((sqrMagN + sqrMagI) >= 4) { if (i * Intensity < 256) Conf[index] = (uint8_t)(i * Intensity); else Conf[index] = 255; break; } } } __global__ void initialize(Com C, int bat, double iteration) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; C.I[index] = 0; C.N[index] = 0; C.posN[index] = ((((double)threadIdx.x / double(Width - 1)) - 0.5) / iteration) + ReCoord; C.posI[index] = ((((double)(bat * batch + blockIdx.x) / double(Height - 1)) - 0.5) / iteration) + ImCoord; } void checkDevice() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0 * prop.memoryClockRate * (prop.memoryBusWidth / 8) / 1.0e6); } } int main() { checkDevice(); //Image Variable Mat image = Mat::zeros(Height, Width, CV_8UC1); //Com C; Com c_C; uint8_t* Conf; uint8_t* c_Conf; size_t size = sizeof(double) * Height * batch; //C.I = (double*)malloc(size); //C.N = (double*)malloc(size); Conf = (uint8_t*)malloc(sizeof(uint8_t) * Height * batch); cudaMalloc(&c_C.I, size); cudaMalloc(&c_C.N, size); cudaMalloc(&c_C.posI, size); cudaMalloc(&c_C.posN, size); cudaMalloc(&c_Conf, sizeof(uint8_t) * Height * batch); int z = 0; int iter = 0; while (!debug) { for (z = 0; z < (Width / batch); z+=Speed) { int i = 0; int j = 0; initialize << <batch, Height >> > (c_C, z, (double)pow(2, (rate * (double)iter))); cudaDeviceSynchronize(); cudaMemcpy(c_Conf, Conf, sizeof(uint8_t) * Height * batch, cudaMemcpyHostToDevice); calculate << <batch, Height >> > (c_C, c_Conf); cudaDeviceSynchronize(); //cudaMemcpy(C.I, c_C.I, size, cudaMemcpyDeviceToHost); //cudaMemcpy(C.N, c_C.N, size, cudaMemcpyDeviceToHost); cudaMemcpy(Conf, c_Conf, sizeof(uint8_t) * Height * batch, cudaMemcpyDeviceToHost); for (j = 0; j < batch; j++) { for (i = 0; i < Height; i++) { image.data[((z * batch + j) * Height + i)] = Conf[(j * Height + i)]; } } } imshow("Display Window", image); char c = (char)waitKey(25); if (c == 27) break; iter++; } cudaFree(c_C.I); cudaFree(c_C.N); cudaFree(c_C.posI); cudaFree(c_C.posN); cudaFree(c_Conf); free(Conf); //free(C.I); //free(C.N); //free(C.posI); //free(C.posN); /* imshow("Display Window", image); waitKey(0); */ return 0; }
3d248a30b02060663b55d7cfb315b4d0e2ba077e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cu_raycast.h" #include "MatUtils.h" #include "launch_utils.h" #include "InvalidValue.h" namespace loo { ////////////////////////////////////////////////////// // Phong shading. ////////////////////////////////////////////////////// __host__ __device__ inline float PhongShade(const float3 p_c, const float3 n_c) { const float ambient = 0.2; const float diffuse = 0.4; const float specular = 0.4; const float3 eyedir = -1.0f * p_c / length(p_c); const float3 _lightdir = make_float3(0.4,0.4,-1); const float3 lightdir = _lightdir / length(_lightdir); const float ldotn = dot(lightdir,n_c); const float3 lightreflect = 2*ldotn*n_c + (-1.0) * lightdir; const float edotr = fmaxf(0,dot(eyedir,lightreflect)); const float spec = edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr; return ambient + diffuse * ldotn + specular * spec; } ////////////////////////////////////////////////////// // Raycast SDF ////////////////////////////////////////////////////// __global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 c_w = SE3Translation(T_wc); //position of camera in world space const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); //camera // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; //(vol.bbox.Min() - c_w) voxel camera bound--> const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; ////(vol.bbox.Min() - c_w) voxel camera bound--> const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); //1000 const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); // 2200 float depth = 0.0f; // If ray intersects bounding box if(max_tmin < min_tmax ) { // Go between max_tmin and min_tmax float lambda = max_tmin; float last_sdf = InvalidValue<float>::Value(); float min_delta_lambda = vol.VoxelSizeUnits().x; float delta_lambda = 0; // March through space while(lambda < min_tmax) { const float3 pos_w = c_w + lambda * ray_w; const float sdf = vol.GetUnitsTrilinearClamped(pos_w); if( sdf <= 0 ) { if( last_sdf > 0) { // surface! if(subpix) { lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); } depth = lambda; } break; } delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; lambda += delta_lambda; last_sdf = sdf; } } // Compute normal const float3 pos_w = c_w + depth * ray_w; const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); const float len_n_w = length(_n_w); const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); const float3 n_c = mulSO3inv(T_wc,n_w); const float3 p_c = depth * ray_c; if(depth > 0 ) { imgdepth(u,v) = depth; img(u,v) = PhongShade(p_c, n_c); norm(u,v) = make_float4(n_c, 1); }else{ imgdepth(u,v) = InvalidValue<float>::Value(); img(u,v) = 0; norm(u,v) = make_float4(0,0,0,0); } } } void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, img); hipLaunchKernelGGL(( KernRaycastSdf), dim3(gridDim),dim3(blockDim), 0, 0, depth, norm, img, vol, T_wc, K, near, far, trunc_dist, subpix); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast Color SDF ////////////////////////////////////////////////////// __global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 c_w = SE3Translation(T_wc); const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); float depth = 0.0f; // If ray intersects bounding box if(max_tmin < min_tmax ) { // Go between max_tmin and min_tmax float lambda = max_tmin; float last_sdf = InvalidValue<float>::Value(); float min_delta_lambda = vol.VoxelSizeUnits().x; float delta_lambda = 0; // March through space while(lambda < min_tmax) { const float3 pos_w = c_w + lambda * ray_w; const float sdf = vol.GetUnitsTrilinearClamped(pos_w); if( sdf <= 0 ) { if( last_sdf > 0) { // surface! if(subpix) { lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); } depth = lambda; } break; } delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; lambda += delta_lambda; last_sdf = sdf; } } // Compute normal const float3 pos_w = c_w + depth * ray_w; const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); const float c = colorVol.GetUnitsTrilinearClamped(pos_w); const float len_n_w = length(_n_w); const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); const float3 n_c = mulSO3inv(T_wc,n_w); if(depth > 0 ) { imgdepth(u,v) = depth; img(u,v) = c; norm(u,v) = make_float4(n_c, 1); }else{ imgdepth(u,v) = InvalidValue<float>::Value(); img(u,v) = 0; norm(u,v) = make_float4(0,0,0,0); } } } void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { dim3 blockDim, gridDim; // InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16); InitDimFromOutputImageOver(blockDim, gridDim, img); hipLaunchKernelGGL(( KernRaycastSdf), dim3(gridDim),dim3(blockDim), 0, 0, depth, norm, img, vol, colorVol, T_wc, K, near, far, trunc_dist, subpix); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast box ////////////////////////////////////////////////////// __global__ void KernRaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < imgd.w && v < imgd.h ) { const float3 c_w = SE3Translation(T_wc); const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (bbox.Min() - c_w) / ray_w; const float3 tmaxbound = (bbox.Max() - c_w) / ray_w; const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(tmin.x, tmin.y), tmin.z); const float min_tmax = fminf(fminf(tmax.x, tmax.y), tmax.z); float d; // If ray intersects bounding box if(max_tmin < min_tmax ) { d = max_tmin; }else{ d = InvalidValue<float>::Value(); } imgd(u,v) = d; } } void RaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox ) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, imgd); hipLaunchKernelGGL(( KernRaycastBox), dim3(gridDim),dim3(blockDim), 0, 0, imgd, T_wc, K, bbox); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast sphere ////////////////////////////////////////////////////// __global__ void KernRaycastSphere(Image<float> imgd, Image<float> img, ImageIntrinsics K, float3 center_c, float r) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < imgd.w && v < imgd.h ) { const float3 ray_c = K.Unproject(u,v); const float ldotc = dot(ray_c,center_c); const float lsq = dot(ray_c,ray_c); const float csq = dot(center_c,center_c); float depth = (ldotc - sqrt(ldotc*ldotc - lsq*(csq - r*r) )) / lsq; const float prev_depth = (imgd(u,v) == 0) ? INFINITY : imgd(u,v); if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { imgd(u,v) = depth; if(img.ptr) { const float3 p_c = depth * ray_c; const float3 n_c = p_c - center_c; img(u,v) = PhongShade(p_c, n_c / length(n_c)); } } } } void RaycastSphere(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, float3 center, float r) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, imgd); const float3 center_c = mulSE3inv(T_wc, center); hipLaunchKernelGGL(( KernRaycastSphere), dim3(gridDim),dim3(blockDim), 0, 0, imgd, img, K, center_c, r); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast plane ////////////////////////////////////////////////////// __global__ void KernRaycastPlane(Image<float> imgd, Image<float> img, ImageIntrinsics K, const float3 n_c) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 ray_c = K.Unproject(u,v); const float depth = -1 / dot(n_c, ray_c); const float prev_depth = imgd(u,v); if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { const float3 p_c = depth * ray_c; img(u,v) = PhongShade(p_c, n_c / length(n_c) ); imgd(u,v) = depth; } } } void RaycastPlane(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, const float3 n_w ) { const float3 n_c = Plane_b_from_a(T_wc, n_w); dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, img); hipLaunchKernelGGL(( KernRaycastPlane), dim3(gridDim),dim3(blockDim), 0, 0, imgd, img, K, n_c ); //GpuCheckErrors(); } }
3d248a30b02060663b55d7cfb315b4d0e2ba077e.cu
#include "cu_raycast.h" #include "MatUtils.h" #include "launch_utils.h" #include "InvalidValue.h" namespace loo { ////////////////////////////////////////////////////// // Phong shading. ////////////////////////////////////////////////////// __host__ __device__ inline float PhongShade(const float3 p_c, const float3 n_c) { const float ambient = 0.2; const float diffuse = 0.4; const float specular = 0.4; const float3 eyedir = -1.0f * p_c / length(p_c); const float3 _lightdir = make_float3(0.4,0.4,-1); const float3 lightdir = _lightdir / length(_lightdir); const float ldotn = dot(lightdir,n_c); const float3 lightreflect = 2*ldotn*n_c + (-1.0) * lightdir; const float edotr = fmaxf(0,dot(eyedir,lightreflect)); const float spec = edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr*edotr; return ambient + diffuse * ldotn + specular * spec; } ////////////////////////////////////////////////////// // Raycast SDF ////////////////////////////////////////////////////// __global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 c_w = SE3Translation(T_wc); //position of camera in world space const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); //将camera射线转化到世界坐标系下 // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; //(vol.bbox.Min() - c_w) 空间voxel的一角 与camera的连线 最小bound-->步长 const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; ////(vol.bbox.Min() - c_w) 空间voxel的一角 与camera的连线 最大bound-->步长 const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); //1000 const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); // 2200 float depth = 0.0f; // If ray intersects bounding box if(max_tmin < min_tmax ) { // Go between max_tmin and min_tmax float lambda = max_tmin; float last_sdf = InvalidValue<float>::Value(); float min_delta_lambda = vol.VoxelSizeUnits().x; float delta_lambda = 0; // March through space while(lambda < min_tmax) { const float3 pos_w = c_w + lambda * ray_w; const float sdf = vol.GetUnitsTrilinearClamped(pos_w); if( sdf <= 0 ) { if( last_sdf > 0) { // surface! if(subpix) { lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); } depth = lambda; } break; } delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; lambda += delta_lambda; last_sdf = sdf; } } // Compute normal const float3 pos_w = c_w + depth * ray_w; const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); const float len_n_w = length(_n_w); const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); const float3 n_c = mulSO3inv(T_wc,n_w); const float3 p_c = depth * ray_c; if(depth > 0 ) { imgdepth(u,v) = depth; img(u,v) = PhongShade(p_c, n_c); norm(u,v) = make_float4(n_c, 1); }else{ imgdepth(u,v) = InvalidValue<float>::Value(); img(u,v) = 0; norm(u,v) = make_float4(0,0,0,0); } } } void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, img); KernRaycastSdf<<<gridDim,blockDim>>>(depth, norm, img, vol, T_wc, K, near, far, trunc_dist, subpix); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast Color SDF ////////////////////////////////////////////////////// __global__ void KernRaycastSdf(Image<float> imgdepth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 c_w = SE3Translation(T_wc); const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (vol.bbox.Min() - c_w) / ray_w; const float3 tmaxbound = (vol.bbox.Max() - c_w) / ray_w; const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(fmaxf(tmin.x, tmin.y), tmin.z), near); const float min_tmax = fminf(fminf(fminf(tmax.x, tmax.y), tmax.z), far); float depth = 0.0f; // If ray intersects bounding box if(max_tmin < min_tmax ) { // Go between max_tmin and min_tmax float lambda = max_tmin; float last_sdf = InvalidValue<float>::Value(); float min_delta_lambda = vol.VoxelSizeUnits().x; float delta_lambda = 0; // March through space while(lambda < min_tmax) { const float3 pos_w = c_w + lambda * ray_w; const float sdf = vol.GetUnitsTrilinearClamped(pos_w); if( sdf <= 0 ) { if( last_sdf > 0) { // surface! if(subpix) { lambda = lambda + delta_lambda * sdf / (last_sdf - sdf); } depth = lambda; } break; } delta_lambda = sdf > 0 ? fmaxf(sdf, min_delta_lambda) : trunc_dist; lambda += delta_lambda; last_sdf = sdf; } } // Compute normal const float3 pos_w = c_w + depth * ray_w; const float3 _n_w = vol.GetUnitsBackwardDiffDxDyDz(pos_w); const float c = colorVol.GetUnitsTrilinearClamped(pos_w); const float len_n_w = length(_n_w); const float3 n_w = len_n_w > 0 ? _n_w / len_n_w : make_float3(0,0,1); const float3 n_c = mulSO3inv(T_wc,n_w); if(depth > 0 ) { imgdepth(u,v) = depth; img(u,v) = c; norm(u,v) = make_float4(n_c, 1); }else{ imgdepth(u,v) = InvalidValue<float>::Value(); img(u,v) = 0; norm(u,v) = make_float4(0,0,0,0); } } } void RaycastSdf(Image<float> depth, Image<float4> norm, Image<float> img, const BoundedVolume<SDF_t> vol, const BoundedVolume<float> colorVol, const Mat<float,3,4> T_wc, ImageIntrinsics K, float near, float far, float trunc_dist, bool subpix ) { dim3 blockDim, gridDim; // InitDimFromOutputImageOver(blockDim, gridDim, img, 16, 16); InitDimFromOutputImageOver(blockDim, gridDim, img); KernRaycastSdf<<<gridDim,blockDim>>>(depth, norm, img, vol, colorVol, T_wc, K, near, far, trunc_dist, subpix); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast box ////////////////////////////////////////////////////// __global__ void KernRaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox ) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < imgd.w && v < imgd.h ) { const float3 c_w = SE3Translation(T_wc); const float3 ray_c = K.Unproject(u,v); const float3 ray_w = mulSO3(T_wc, ray_c); // Raycast bounding box to find valid ray segment of sdf // http://www.cs.utah.edu/~awilliam/box/box.pdf const float3 tminbound = (bbox.Min() - c_w) / ray_w; const float3 tmaxbound = (bbox.Max() - c_w) / ray_w; const float3 tmin = fminf(tminbound,tmaxbound); const float3 tmax = fmaxf(tminbound,tmaxbound); const float max_tmin = fmaxf(fmaxf(tmin.x, tmin.y), tmin.z); const float min_tmax = fminf(fminf(tmax.x, tmax.y), tmax.z); float d; // If ray intersects bounding box if(max_tmin < min_tmax ) { d = max_tmin; }else{ d = InvalidValue<float>::Value(); } imgd(u,v) = d; } } void RaycastBox(Image<float> imgd, const Mat<float,3,4> T_wc, ImageIntrinsics K, const BoundingBox bbox ) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, imgd); KernRaycastBox<<<gridDim,blockDim>>>(imgd, T_wc, K, bbox); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast sphere ////////////////////////////////////////////////////// __global__ void KernRaycastSphere(Image<float> imgd, Image<float> img, ImageIntrinsics K, float3 center_c, float r) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < imgd.w && v < imgd.h ) { const float3 ray_c = K.Unproject(u,v); const float ldotc = dot(ray_c,center_c); const float lsq = dot(ray_c,ray_c); const float csq = dot(center_c,center_c); float depth = (ldotc - sqrt(ldotc*ldotc - lsq*(csq - r*r) )) / lsq; const float prev_depth = (imgd(u,v) == 0) ? INFINITY : imgd(u,v); if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { imgd(u,v) = depth; if(img.ptr) { const float3 p_c = depth * ray_c; const float3 n_c = p_c - center_c; img(u,v) = PhongShade(p_c, n_c / length(n_c)); } } } } void RaycastSphere(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, float3 center, float r) { dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, imgd); const float3 center_c = mulSE3inv(T_wc, center); KernRaycastSphere<<<gridDim,blockDim>>>(imgd, img, K, center_c, r); //GpuCheckErrors(); } ////////////////////////////////////////////////////// // Raycast plane ////////////////////////////////////////////////////// __global__ void KernRaycastPlane(Image<float> imgd, Image<float> img, ImageIntrinsics K, const float3 n_c) { const int u = blockIdx.x*blockDim.x + threadIdx.x; const int v = blockIdx.y*blockDim.y + threadIdx.y; if( u < img.w && v < img.h ) { const float3 ray_c = K.Unproject(u,v); const float depth = -1 / dot(n_c, ray_c); const float prev_depth = imgd(u,v); if(depth > 0 && (depth < prev_depth || !isfinite(prev_depth)) ) { const float3 p_c = depth * ray_c; img(u,v) = PhongShade(p_c, n_c / length(n_c) ); imgd(u,v) = depth; } } } void RaycastPlane(Image<float> imgd, Image<float> img, const Mat<float,3,4> T_wc, ImageIntrinsics K, const float3 n_w ) { const float3 n_c = Plane_b_from_a(T_wc, n_w); dim3 blockDim, gridDim; InitDimFromOutputImageOver(blockDim, gridDim, img); KernRaycastPlane<<<gridDim,blockDim>>>(imgd, img, K, n_c ); //GpuCheckErrors(); } }
64f2e6e03746c7d6859e8c59ab98a9b5a47f668a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void plus100Kernel(int *input, int* output) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 10000) { output[i] = input[i] + 100; } } void plus100() { int *d_input = 0; int *d_output = 0; hipMalloc((void**)&d_input, 10000 * sizeof(int)); hipMalloc((void**)&d_output, 10000 * sizeof(int)); srand(time(NULL)); int* matrice = (int*)malloc(sizeof(int) * 10000); for (int i = 0; i < 10000; i++) { matrice[i] = rand() % 100; } // Copier vers le dispositif hipMemcpy(d_input, matrice, 10000 * sizeof(int), hipMemcpyHostToDevice); // Appeler le kernel avec 256 blocs hipLaunchKernelGGL(( plus100Kernel), dim3(256), dim3(1), 0, 0, d_input, d_output); // Attendre que le kernel ait fini, puis copier vers l'hte hipDeviceSynchronize(); hipMemcpy(matrice, d_output, 10000 * sizeof(int), hipMemcpyDeviceToHost); for (int i = 0; i < 10000; i++) { printf("%d\n", matrice[i]); } }
64f2e6e03746c7d6859e8c59ab98a9b5a47f668a.cu
__global__ void plus100Kernel(int *input, int* output) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < 10000) { output[i] = input[i] + 100; } } void plus100() { int *d_input = 0; int *d_output = 0; cudaMalloc((void**)&d_input, 10000 * sizeof(int)); cudaMalloc((void**)&d_output, 10000 * sizeof(int)); srand(time(NULL)); int* matrice = (int*)malloc(sizeof(int) * 10000); for (int i = 0; i < 10000; i++) { matrice[i] = rand() % 100; } // Copier vers le dispositif cudaMemcpy(d_input, matrice, 10000 * sizeof(int), cudaMemcpyHostToDevice); // Appeler le kernel avec 256 blocs plus100Kernel<<<256, 1>>>(d_input, d_output); // Attendre que le kernel ait fini, puis copier vers l'hôte cudaDeviceSynchronize(); cudaMemcpy(matrice, d_output, 10000 * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < 10000; i++) { printf("%d\n", matrice[i]); } }
39756d293cc0669356b95815387a0579a6778cc5.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r111;\n\t" " .reg .s32 %r112;\n\t" " .reg .s32 %r113;\n\t" " .reg .s32 %r114;\n\t" " .reg .s32 %r115;\n\t" " .reg .s32 %r116;\n\t" " .reg .s32 %r117;\n\t" " .reg .s32 %r118;\n\t" " .reg .s32 %r119;\n\t" " .reg .s32 %r120;\n\t" " .reg .s32 %r121;\n\t" " .reg .s32 %r122;\n\t" " .reg .s32 %r123;\n\t" " .reg .s32 %r124;\n\t" " .reg .s32 %r125;\n\t" " .reg .s32 %r126;\n\t" " .reg .s32 %r127;\n\t" " .reg .s32 %r128;\n\t" "mov.s32 %r112, 44;\n\t" "mov.s32 %r113, %r112;\n\t" "mov.s32 %r114, 22;\n\t" "mov.s32 %r115, 33;\n\t" "mov.s32 %r116, 123;\n\t" "mov.s32 %r117, 242;\n\t" "mov.s32 %r118, 334;\n\t" "mov.s32 %r119, 562;\n\t" "mov.s32 %r120, 256;\n\t" "mov.s32 %r121, 156;\n\t" "mov.s32 %r122, 256;\n\t" "mov.s32 %r123, 556;\n\t" "mov.s32 %r124, 856;\n\t" "mov.s32 %r125, 356;\n\t" "mov.s32 %r126, 556;\n\t" "mov.s32 %r127, 656;\n\t" "mov.s32 %r128, 56;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // hipMalloc((void**)&d_A, sizeof(float)); // hipMalloc((void**)&d_B, sizeof(float)); // hipMalloc((void**)&d_C, sizeof(float)); hipMalloc((void**)&d_res, sizeof(float)); // hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); // hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations); hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; hipEventDestroy(start); hipEventDestroy(stop); hipDeviceSynchronize(); hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost); return 0; }
39756d293cc0669356b95815387a0579a6778cc5.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r111;\n\t" " .reg .s32 %r112;\n\t" " .reg .s32 %r113;\n\t" " .reg .s32 %r114;\n\t" " .reg .s32 %r115;\n\t" " .reg .s32 %r116;\n\t" " .reg .s32 %r117;\n\t" " .reg .s32 %r118;\n\t" " .reg .s32 %r119;\n\t" " .reg .s32 %r120;\n\t" " .reg .s32 %r121;\n\t" " .reg .s32 %r122;\n\t" " .reg .s32 %r123;\n\t" " .reg .s32 %r124;\n\t" " .reg .s32 %r125;\n\t" " .reg .s32 %r126;\n\t" " .reg .s32 %r127;\n\t" " .reg .s32 %r128;\n\t" "mov.s32 %r112, 44;\n\t" "mov.s32 %r113, %r112;\n\t" "mov.s32 %r114, 22;\n\t" "mov.s32 %r115, 33;\n\t" "mov.s32 %r116, 123;\n\t" "mov.s32 %r117, 242;\n\t" "mov.s32 %r118, 334;\n\t" "mov.s32 %r119, 562;\n\t" "mov.s32 %r120, 256;\n\t" "mov.s32 %r121, 156;\n\t" "mov.s32 %r122, 256;\n\t" "mov.s32 %r123, 556;\n\t" "mov.s32 %r124, 856;\n\t" "mov.s32 %r125, 356;\n\t" "mov.s32 %r126, 556;\n\t" "mov.s32 %r127, 656;\n\t" "mov.s32 %r128, 56;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" "max.s32 %r113, %r111, %r113;\n\t" "max.s32 %r114, %r111, %r114;\n\t" "max.s32 %r115, %r111, %r115;\n\t" "max.s32 %r116, %r111, %r116;\n\t" "max.s32 %r117, %r111, %r117;\n\t" "max.s32 %r118, %r111, %r118;\n\t" "max.s32 %r119, %r111, %r119;\n\t" "max.s32 %r120, %r111, %r120;\n\t" "max.s32 %r121, %r111, %r121;\n\t" "max.s32 %r122, %r111, %r122;\n\t" "max.s32 %r123, %r111, %r123;\n\t" "max.s32 %r124, %r111, %r124;\n\t" "max.s32 %r125, %r111, %r125;\n\t" "max.s32 %r126, %r111, %r126;\n\t" "max.s32 %r127, %r111, %r127;\n\t" "max.s32 %r128, %r111, %r128;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // cudaMalloc((void**)&d_A, sizeof(float)); // cudaMalloc((void**)&d_B, sizeof(float)); // cudaMalloc((void**)&d_C, sizeof(float)); cudaMalloc((void**)&d_res, sizeof(float)); // cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); // compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations); compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceSynchronize(); cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost); return 0; }
8c96ee19e7e8c34597a921a45e62be8ddf476630.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <functional> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "glut_functions.cuh" #include "kernels_hip.cuh" namespace glf = glut_functions; int main(int argc, char** argv) { glf::window = new Window(data::BOIDS_COUNT); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB); glutInitWindowSize(data::WIDTH, data::HEIGHT); glutCreateWindow("Boids"); glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glutDisplayFunc(glf::display); glutReshapeFunc(glf::reshape); glutKeyboardFunc(glf::keyboardLetters); glutMouseFunc(glf::mouse); glutPassiveMotionFunc(glf::mousePassive); atexit(glf::exitingFunction); glutTimerFunc(0, glf::onTimer, 0); glutMainLoop(); return 0; }
8c96ee19e7e8c34597a921a45e62be8ddf476630.cu
#include <stdio.h> #include <functional> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "glut_functions.cuh" #include "kernels.cuh" namespace glf = glut_functions; int main(int argc, char** argv) { glf::window = new Window(data::BOIDS_COUNT); glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB); glutInitWindowSize(data::WIDTH, data::HEIGHT); glutCreateWindow("Boids"); glShadeModel(GL_FLAT); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glutDisplayFunc(glf::display); glutReshapeFunc(glf::reshape); glutKeyboardFunc(glf::keyboardLetters); glutMouseFunc(glf::mouse); glutPassiveMotionFunc(glf::mousePassive); atexit(glf::exitingFunction); glutTimerFunc(0, glf::onTimer, 0); glutMainLoop(); return 0; }
0a4f8eeac77bd664ce8bbbd61f1abcae2fc6372f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; int labeled_target_size = 0; int num = 32 + labeled_target_size; const int nthreads = num * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, num, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= num; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; int labeled_target_size = 0; int num = 32 + labeled_target_size; const int nthreads = num * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, num, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
0a4f8eeac77bd664ce8bbbd61f1abcae2fc6372f.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; int labeled_target_size = 0; int num = 32 + labeled_target_size; const int nthreads = num * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, num, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= num; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; int labeled_target_size = 0; int num = 32 + labeled_target_size; const int nthreads = num * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, num, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / num, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
d6776e21d23209d4be396d7738971d80e9051555.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2018 Wei Dai <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ // Include these two files for CPU computing. #include <include/cufhe_gpu.cuh> using namespace cufhe; #include <iostream> using namespace std; #include <hip/hip_runtime_api.h> Ctxt cufhe::ct_zero; Ctxt cufhe::ct_one; // Initialize a plaintext array void init_ptxt(Ptxt* p, int8_t x, uint8_t n) { for (int i = 0; i < n; i++) { p[i].message_ = x & 0x1; x >>= 1; } } int8_t dump_ptxt(Ptxt* p, uint8_t n) { int8_t out = 0; for (int i = n-1; i >= 0; i--) { cout<<p[i].message_; out |= p[i].message_ << i; } cout<<endl; return out; } int main() { uint8_t N = 8; SetSeed(); // set random seed // plaintext Ptxt* pta = new Ptxt[N]; // input a Ptxt* ptb = new Ptxt[N]; // input b Ptxt* ptz = new Ptxt[N]; // output Ptxt* pts = new Ptxt; Ctxt* cta = new Ctxt[N]; // input a Ctxt* ctb = new Ctxt[N]; // input b Ctxt* ctz = new Ctxt[N]; // output Ctxt* ctc = new Ctxt[N]; // carry Ctxt* ctt = new Ctxt[10*N+1]; Ctxt* cts = new Ctxt; char filename[11]; /*float et; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop);*/ cout<< "------ Open Key ------" <<endl; PriKey pri_key; ReadPriKeyFromFile(pri_key, "pri_key.txt"); Encrypt(*cts, *pts, pri_key); Ptxt* pt_one = new Ptxt; Ptxt* pt_zero = new Ptxt; init_ptxt(pt_zero, 0, 1); init_ptxt(pt_one, 1, 1); Encrypt(ct_zero, *pt_zero, pri_key); Encrypt(ct_one, *pt_one, pri_key); // Calculate /*cout<< "Calculating..."<<endl; hipProfilerStart(); hipEventRecord(start, 0); Add(ctz, ctc, cta, ctb, ctt, st, N); Synchronize(); hipEventRecord(stop, 0); hipEventSynchronize(stop); */ // Decrypt for (int i = N-1; i >= 0; i--) { snprintf(filename, 11, "answer%d.txt", i); ReadCtxtFromFile(ctz[i],filename); } cout<< "Decrypting"<<endl; for (int i = N-1; i >= 0; i--) { Decrypt(ptz[i], ctz[i], pri_key); } cout<<"A + B = "<<int(dump_ptxt(ptz, N))<<endl; /*hipEventElapsedTime(&et, start, stop); cout<<"Elapsed: "<<et<<" ms"<<endl;*/ Decrypt(pta[0], ctc[N-1], pri_key); cout<<"carry out: "<<pta[0].message_<<endl; delete [] pta; delete [] ptb; delete [] ptz; delete [] cta; delete [] ctb; delete [] ctz; delete [] ctc; return 0; }
d6776e21d23209d4be396d7738971d80e9051555.cu
/** * Copyright 2018 Wei Dai <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ // Include these two files for CPU computing. #include <include/cufhe_gpu.cuh> using namespace cufhe; #include <iostream> using namespace std; #include <cuda_profiler_api.h> Ctxt cufhe::ct_zero; Ctxt cufhe::ct_one; // Initialize a plaintext array void init_ptxt(Ptxt* p, int8_t x, uint8_t n) { for (int i = 0; i < n; i++) { p[i].message_ = x & 0x1; x >>= 1; } } int8_t dump_ptxt(Ptxt* p, uint8_t n) { int8_t out = 0; for (int i = n-1; i >= 0; i--) { cout<<p[i].message_; out |= p[i].message_ << i; } cout<<endl; return out; } int main() { uint8_t N = 8; SetSeed(); // set random seed // plaintext Ptxt* pta = new Ptxt[N]; // input a Ptxt* ptb = new Ptxt[N]; // input b Ptxt* ptz = new Ptxt[N]; // output Ptxt* pts = new Ptxt; Ctxt* cta = new Ctxt[N]; // input a Ctxt* ctb = new Ctxt[N]; // input b Ctxt* ctz = new Ctxt[N]; // output Ctxt* ctc = new Ctxt[N]; // carry Ctxt* ctt = new Ctxt[10*N+1]; Ctxt* cts = new Ctxt; char filename[11]; /*float et; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop);*/ cout<< "------ Open Key ------" <<endl; PriKey pri_key; ReadPriKeyFromFile(pri_key, "pri_key.txt"); Encrypt(*cts, *pts, pri_key); Ptxt* pt_one = new Ptxt; Ptxt* pt_zero = new Ptxt; init_ptxt(pt_zero, 0, 1); init_ptxt(pt_one, 1, 1); Encrypt(ct_zero, *pt_zero, pri_key); Encrypt(ct_one, *pt_one, pri_key); // Calculate /*cout<< "Calculating..."<<endl; cudaProfilerStart(); cudaEventRecord(start, 0); Add(ctz, ctc, cta, ctb, ctt, st, N); Synchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); */ // Decrypt for (int i = N-1; i >= 0; i--) { snprintf(filename, 11, "answer%d.txt", i); ReadCtxtFromFile(ctz[i],filename); } cout<< "Decrypting"<<endl; for (int i = N-1; i >= 0; i--) { Decrypt(ptz[i], ctz[i], pri_key); } cout<<"A + B = "<<int(dump_ptxt(ptz, N))<<endl; /*cudaEventElapsedTime(&et, start, stop); cout<<"Elapsed: "<<et<<" ms"<<endl;*/ Decrypt(pta[0], ctc[N-1], pri_key); cout<<"carry out: "<<pta[0].message_<<endl; delete [] pta; delete [] ptb; delete [] ptz; delete [] cta; delete [] ctb; delete [] ctz; delete [] ctc; return 0; }
5db2aa2042c77f0498076bb5b5f3c3b95a02d221.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <assert.h> #define VERBOSE #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } #define NDIMS 2 __global__ void kernel1(float *dranks, int *dlinks, int *dlink_counts, float *dlink_weights, int nLinks, int start, int end, int GPUN) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (start <= id && id <= end) { dlink_weights[id-start] = dranks[dlinks[id*NDIMS+0]-1] / (float) dlink_counts[dlinks[id*NDIMS+0]-1]; } } __global__ void kernel2(float *dranks, int *dlinks, float *dlink_weights, int nDocs, int nLinks, int start, int end, int GPUN) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (start <= id && id <= end) { float new_rank = 0.0f; // look for links pointing to this document for (int l = 0; l < nLinks; l++) { int dst = dlinks[l*NDIMS+1] - 1; if (dst == id) { new_rank += dlink_weights[l]; } } dranks[id-start] = new_rank; } } extern "C" { void prCUDA1(float* ranks, int *links, int *link_counts, float *link_weights, int nDocs, int nLinks, int start, int end, int GPUN) { float *dranks, *dlink_weights; int *dlinks, *dlink_counts; if (GPUN > 0) { assert(end - start + 1 == GPUN); #ifdef VERBOSE printf("In prCUDA1\n"); printf("\t GPUN: %d\n", GPUN); printf("\t range: %d..%d\n", start, end); #endif CudaSafeCall(hipMalloc(&dranks, sizeof(float) * nDocs)); CudaSafeCall(hipMalloc(&dlinks, sizeof(int) * nLinks * 2)); CudaSafeCall(hipMalloc(&dlink_counts, sizeof(int) * nDocs)); CudaSafeCall(hipMalloc(&dlink_weights, sizeof(float) * nLinks)); CudaSafeCall(hipMemcpy(dranks, ranks, sizeof(float) * nDocs, hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(dlinks, links, sizeof(int) * nLinks * 2, hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(dlink_counts, link_counts, sizeof(int) * nDocs, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel1), dim3(ceil(((float)nLinks)/1024)), dim3(1024), 0, 0, dranks, dlinks, dlink_counts, dlink_weights, nLinks, start, end, GPUN); CudaSafeCall(hipDeviceSynchronize()); CudaSafeCall(hipMemcpy(link_weights + start, dlink_weights, sizeof(float) * GPUN, hipMemcpyDeviceToHost)); CudaSafeCall(hipFree(dranks)); CudaSafeCall(hipFree(dlinks)); CudaSafeCall(hipFree(dlink_counts)); CudaSafeCall(hipFree(dlink_weights)); } } void prCUDA2(float* ranks, int *links, float *link_weights, int nDocs, int nLinks, int start, int end, int GPUN) { float *dranks, *dlink_weights; int *dlinks; if (GPUN > 0) { assert(end - start + 1 == GPUN); #ifdef VERBOSE printf("In prCUDA2\n"); printf("\t GPUN: %d\n", GPUN); printf("\t range: %d..%d\n", start, end); #endif CudaSafeCall(hipMalloc(&dranks, sizeof(float) * GPUN)); CudaSafeCall(hipMalloc(&dlinks, sizeof(int) * nLinks * 2)); CudaSafeCall(hipMalloc(&dlink_weights, sizeof(float) * nLinks)); CudaSafeCall(hipMemcpy(dlinks, links, sizeof(int) * nLinks * 2, hipMemcpyHostToDevice)); CudaSafeCall(hipMemcpy(dlink_weights, link_weights, sizeof(float) * nLinks, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel2), dim3(ceil(((float)nDocs)/1024)), dim3(1024), 0, 0, dranks, dlinks, dlink_weights, nDocs, nLinks, start, end, GPUN); CudaSafeCall(hipDeviceSynchronize()); CudaSafeCall(hipMemcpy(ranks + start, dranks, sizeof(float) * GPUN, hipMemcpyDeviceToHost)); CudaSafeCall(hipFree(dranks)); CudaSafeCall(hipFree(dlinks)); CudaSafeCall(hipFree(dlink_weights)); } } }
5db2aa2042c77f0498076bb5b5f3c3b95a02d221.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <assert.h> #define VERBOSE #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } #define NDIMS 2 __global__ void kernel1(float *dranks, int *dlinks, int *dlink_counts, float *dlink_weights, int nLinks, int start, int end, int GPUN) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (start <= id && id <= end) { dlink_weights[id-start] = dranks[dlinks[id*NDIMS+0]-1] / (float) dlink_counts[dlinks[id*NDIMS+0]-1]; } } __global__ void kernel2(float *dranks, int *dlinks, float *dlink_weights, int nDocs, int nLinks, int start, int end, int GPUN) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (start <= id && id <= end) { float new_rank = 0.0f; // look for links pointing to this document for (int l = 0; l < nLinks; l++) { int dst = dlinks[l*NDIMS+1] - 1; if (dst == id) { new_rank += dlink_weights[l]; } } dranks[id-start] = new_rank; } } extern "C" { void prCUDA1(float* ranks, int *links, int *link_counts, float *link_weights, int nDocs, int nLinks, int start, int end, int GPUN) { float *dranks, *dlink_weights; int *dlinks, *dlink_counts; if (GPUN > 0) { assert(end - start + 1 == GPUN); #ifdef VERBOSE printf("In prCUDA1\n"); printf("\t GPUN: %d\n", GPUN); printf("\t range: %d..%d\n", start, end); #endif CudaSafeCall(cudaMalloc(&dranks, sizeof(float) * nDocs)); CudaSafeCall(cudaMalloc(&dlinks, sizeof(int) * nLinks * 2)); CudaSafeCall(cudaMalloc(&dlink_counts, sizeof(int) * nDocs)); CudaSafeCall(cudaMalloc(&dlink_weights, sizeof(float) * nLinks)); CudaSafeCall(cudaMemcpy(dranks, ranks, sizeof(float) * nDocs, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dlinks, links, sizeof(int) * nLinks * 2, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dlink_counts, link_counts, sizeof(int) * nDocs, cudaMemcpyHostToDevice)); kernel1<<<ceil(((float)nLinks)/1024), 1024>>>(dranks, dlinks, dlink_counts, dlink_weights, nLinks, start, end, GPUN); CudaSafeCall(cudaDeviceSynchronize()); CudaSafeCall(cudaMemcpy(link_weights + start, dlink_weights, sizeof(float) * GPUN, cudaMemcpyDeviceToHost)); CudaSafeCall(cudaFree(dranks)); CudaSafeCall(cudaFree(dlinks)); CudaSafeCall(cudaFree(dlink_counts)); CudaSafeCall(cudaFree(dlink_weights)); } } void prCUDA2(float* ranks, int *links, float *link_weights, int nDocs, int nLinks, int start, int end, int GPUN) { float *dranks, *dlink_weights; int *dlinks; if (GPUN > 0) { assert(end - start + 1 == GPUN); #ifdef VERBOSE printf("In prCUDA2\n"); printf("\t GPUN: %d\n", GPUN); printf("\t range: %d..%d\n", start, end); #endif CudaSafeCall(cudaMalloc(&dranks, sizeof(float) * GPUN)); CudaSafeCall(cudaMalloc(&dlinks, sizeof(int) * nLinks * 2)); CudaSafeCall(cudaMalloc(&dlink_weights, sizeof(float) * nLinks)); CudaSafeCall(cudaMemcpy(dlinks, links, sizeof(int) * nLinks * 2, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dlink_weights, link_weights, sizeof(float) * nLinks, cudaMemcpyHostToDevice)); kernel2<<<ceil(((float)nDocs)/1024), 1024>>>(dranks, dlinks, dlink_weights, nDocs, nLinks, start, end, GPUN); CudaSafeCall(cudaDeviceSynchronize()); CudaSafeCall(cudaMemcpy(ranks + start, dranks, sizeof(float) * GPUN, cudaMemcpyDeviceToHost)); CudaSafeCall(cudaFree(dranks)); CudaSafeCall(cudaFree(dlinks)); CudaSafeCall(cudaFree(dlink_weights)); } } }
1180a3c21aca983e78a6d533e96912f3ae236d09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util.h" const char inputName256one[] = "data/input_one_14_1024.bin"; const char weightName256one[] = "data/weight_one_1024.bin"; const char bnBias_myKernel_Name256one[] = "data/bnBias_myKernel_one_1024.bin"; const char bnScale_myKernel_Name256one[] = "data/bnScale_myKernel_one_1024.bin"; __global__ void kernel_1024_one_256( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *__restrict__ weights = shared_ + 1024*4, *__restrict__ output = weights + 256*16, *__restrict__ input = shared_; float *__restrict__ bias = output + 4*256, *__restrict__ scale = bias + 256; for (int i = 0; i < 4; i++) input[ind + i*1024] = A[tile*4096 + i*1024 + ind]; bias[in_channel] = bnBias[in_channel]; scale[in_channel] = bnScale[in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 1024; k += 16) { const float *B_start = B + k*256; for (int i = 0; i < 4; i++) weights[ind + i*1024] = B_start[i*1024 + ind]; __syncthreads(); const float *A_start = input + k; for (int p = 0; p < 16; p++) { output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel]; C_start[ind] = res > 0 ? res : 0; } __global__ void kernel_256_one_1024( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_; float *bias = output + 4*256, *scale = bias + 256; input[ind] = A[tile * 1024 + ind]; bias[in_channel] = bnBias[part*256 + in_channel]; scale[in_channel] = bnScale[part*256+ in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 256; k += 32) { for (int i = 0; i < 8; i++) weights[ind + 1024*i] = B[(k + i*4 + line)*1024 + part*256 + in_channel]; __syncthreads(); float *A_start = input + k; for (int p = 0; p < 32; p++) { output[ind] += A_start[line*256 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*4096 + part*256; C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel]; } void kernel_256_1_in(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*1024); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 256<<2); hipMalloc((void **) &bnScale_, 256<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 256<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 256<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( kernel_1024_one_256) , dim3(dim3(49)), dim3(dim3(256, 4)), (4*1024 + 16*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(input); free(weight); free(bnBias_myKernel); free(bnScale_myKernel); } void kernel_256_1_out(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*256); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 1024<<2); hipMalloc((void **) &bnScale_, 1024<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 1024<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 1024<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( kernel_256_one_1024) , dim3(dim3(49, 4)), dim3(dim3(256, 4)), (4*256 + 32*256 + 4*256 + 2*256)<<2 , 0, 0, input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(bnBias_myKernel); free(bnScale_myKernel); free(input); free(weight); }
1180a3c21aca983e78a6d533e96912f3ae236d09.cu
#include "util.h" const char inputName256one[] = "data/input_one_14_1024.bin"; const char weightName256one[] = "data/weight_one_1024.bin"; const char bnBias_myKernel_Name256one[] = "data/bnBias_myKernel_one_1024.bin"; const char bnScale_myKernel_Name256one[] = "data/bnScale_myKernel_one_1024.bin"; __global__ void kernel_1024_one_256( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *__restrict__ weights = shared_ + 1024*4, *__restrict__ output = weights + 256*16, *__restrict__ input = shared_; float *__restrict__ bias = output + 4*256, *__restrict__ scale = bias + 256; for (int i = 0; i < 4; i++) input[ind + i*1024] = A[tile*4096 + i*1024 + ind]; bias[in_channel] = bnBias[in_channel]; scale[in_channel] = bnScale[in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 1024; k += 16) { const float *B_start = B + k*256; for (int i = 0; i < 4; i++) weights[ind + i*1024] = B_start[i*1024 + ind]; __syncthreads(); const float *A_start = input + k; for (int p = 0; p < 16; p++) { output[ind] += A_start[line*1024 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*1024, res = scale[in_channel] * output[ind] + bias[in_channel]; C_start[ind] = res > 0 ? res : 0; } __global__ void kernel_256_one_1024( const float *__restrict__ A, const float *__restrict__ B, const float *__restrict__ bnBias, const float *__restrict__ bnScale, float *__restrict__ C) { int tile = blockIdx.x, part = blockIdx.y, in_channel = threadIdx.x, line = threadIdx.y; int ind = line*256 + in_channel; extern __shared__ float shared_[]; float *weights = shared_ + 256*4, *output = weights + 256*32, *input = shared_; float *bias = output + 4*256, *scale = bias + 256; input[ind] = A[tile * 1024 + ind]; bias[in_channel] = bnBias[part*256 + in_channel]; scale[in_channel] = bnScale[part*256+ in_channel]; output[ind] = 0.0f; __syncthreads(); for (int k = 0; k < 256; k += 32) { for (int i = 0; i < 8; i++) weights[ind + 1024*i] = B[(k + i*4 + line)*1024 + part*256 + in_channel]; __syncthreads(); float *A_start = input + k; for (int p = 0; p < 32; p++) { output[ind] += A_start[line*256 + p] * weights[in_channel + p*256]; } __syncthreads(); } float *C_start = C + tile*4096 + part*256; C_start[line * 1024 + in_channel] = scale[in_channel] * output[ind] + bias[in_channel]; } void kernel_256_1_in(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*1024); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 256); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 256); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*1024, nOutput = 14*14*256, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 256<<2); hipMalloc((void **) &bnScale_, 256<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 256<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 256<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); kernel_1024_one_256 <<<dim3(49), dim3(256, 4), (4*1024 + 16*256 + 4*256 + 2*256)<<2 >>> ( input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(input); free(weight); free(bnBias_myKernel); free(bnScale_myKernel); } void kernel_256_1_out(double &time, double &ktime) { float *input = get_parameter(inputName256one, 14*14*256); float *weight = get_parameter(weightName256one, 256*1024); float *bnBias_myKernel = get_parameter(bnBias_myKernel_Name256one, 1024); float *bnScale_myKernel = get_parameter(bnScale_myKernel_Name256one, 1024); float *input_, *output_, *weight_, *bnBias_, *bnScale_; int nInput = 14*14*256, nOutput = 14*14*1024, nWeights = 256*1024; float result[nOutput]; auto start = std::chrono::steady_clock::now(); hipMalloc((void **) &input_, nInput<<2); hipMalloc((void **) &output_, nOutput<<2); hipMalloc((void **) &weight_, nWeights<<2); hipMalloc((void **) &bnBias_, 1024<<2); hipMalloc((void **) &bnScale_, 1024<<2); hipMemcpy(input_, input, nInput<<2, hipMemcpyHostToDevice); hipMemcpy(weight_, weight, nWeights<<2, hipMemcpyHostToDevice); hipMemcpy(bnBias_, bnBias_myKernel, 1024<<2, hipMemcpyHostToDevice); hipMemcpy(bnScale_, bnScale_myKernel, 1024<<2, hipMemcpyHostToDevice); hipDeviceSynchronize(); auto kstart = std::chrono::steady_clock::now(); kernel_256_one_1024 <<<dim3(49, 4), dim3(256, 4), (4*256 + 32*256 + 4*256 + 2*256)<<2 >>>( input_, weight_, bnBias_, bnScale_, output_); hipDeviceSynchronize(); auto kend = std::chrono::steady_clock::now(); ktime = std::chrono::duration_cast<std::chrono::nanoseconds>(kend - kstart).count(); hipMemcpy(result, output_, nOutput<<2, hipMemcpyDeviceToHost); hipFree(input_); hipFree(output_); hipFree(weight_); hipFree(bnScale_); hipFree(bnBias_); auto end = std::chrono::steady_clock::now(); time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count(); #ifdef DEBUG double s = 0; for (int i = 0; i < nOutput; i++) { s += result[i]; } printf("Check sum: %lf\n", s); #endif free(bnBias_myKernel); free(bnScale_myKernel); free(input); free(weight); }
65f0cf662d2b157c1994a747cc2450da361082e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* File name: nve_leapfrog.cu Date: 2009/04/03 13:49 Author: Aaron Thompson and Lukas Vlcek Copyright (C) 2009 Aaron Thompson and Lukas Vlcek This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License in a file called COPYING along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "nve_leapfrog.h" // Called for each particle i=0 to natom __global__ void leapfrog_nve( float4 *pos, float4 *vel, float4 *force ) { int i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if( i < natom ) { // LEAPFROG VERLET // float4 veli = vel[i]; float massif = __fdividef(dt, veli.w); //float massif = dt/veli.w; float4 f = force[i]; veli.x += f.x*massif; veli.y += f.y*massif; veli.z += f.z*massif; vel[i] = veli; float4 posi = pos[i]; posi.x += veli.x*dt; posi.x -= box.x *truncf(posi.x*2.0f*boxi.x ); posi.y += veli.y*dt; posi.y -= box.y *truncf(posi.y*2.0f*boxi.y); posi.z += veli.z*dt; posi.z -= box.z *truncf(posi.z*2.0f*boxi.z ); pos[i] = posi; } } /* end of nve_leapfrog.cu */
65f0cf662d2b157c1994a747cc2450da361082e9.cu
/* File name: nve_leapfrog.cu Date: 2009/04/03 13:49 Author: Aaron Thompson and Lukas Vlcek Copyright (C) 2009 Aaron Thompson and Lukas Vlcek This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License in a file called COPYING along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "nve_leapfrog.h" // Called for each particle i=0 to natom __global__ void leapfrog_nve( float4 *pos, float4 *vel, float4 *force ) { int i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if( i < natom ) { // LEAPFROG VERLET // float4 veli = vel[i]; float massif = __fdividef(dt, veli.w); //float massif = dt/veli.w; float4 f = force[i]; veli.x += f.x*massif; veli.y += f.y*massif; veli.z += f.z*massif; vel[i] = veli; float4 posi = pos[i]; posi.x += veli.x*dt; posi.x -= box.x *truncf(posi.x*2.0f*boxi.x ); posi.y += veli.y*dt; posi.y -= box.y *truncf(posi.y*2.0f*boxi.y); posi.z += veli.z*dt; posi.z -= box.z *truncf(posi.z*2.0f*boxi.z ); pos[i] = posi; } } /* end of nve_leapfrog.cu */
7aa0dc939296821f1143a4d72524780e3166a4f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <c10/core/GeneratorImpl.h> #include <algorithm> #include <hipcub/hipcub.hpp> #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" #include "caffe2/utils/math.h" namespace caffe2 { namespace { void inclusive_scan_wrapper( const int* length_data, int num_lengths, Tensor* temp_buffer, Tensor* prefix_sum_out, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), num_lengths, context_->cuda_stream()); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); // Run inclusive prefix sum hipcub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), num_lengths, context_->cuda_stream()); } template <typename SIndex> void sort_pairs_wrapper( int num_indices, int num_rows, Tensor* temp_buffer, const Tensor* linear_ind_buffer_, Tensor* sorted_linear_ind_buffer_, const Tensor* seg_id_buffer_, Tensor* sorted_seg_id_buffer_, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; hipcub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_ind_buffer_->template data<SIndex>(), sorted_linear_ind_buffer_->template mutable_data<SIndex>(), seg_id_buffer_->template data<int>(), sorted_seg_id_buffer_->template mutable_data<int>(), num_indices, 0, int(log2(float(num_rows)) + 1), context_->cuda_stream(), false); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); hipcub::DeviceRadixSort::SortPairs( d_temp_storage, temp_storage_bytes, linear_ind_buffer_->template data<SIndex>(), sorted_linear_ind_buffer_->template mutable_data<SIndex>(), seg_id_buffer_->template data<int>(), sorted_seg_id_buffer_->template mutable_data<int>(), num_indices, 0, int(log2(float(num_rows)) + 1), context_->cuda_stream(), false); } template <typename T> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void gradient_mean_kernel( const T* __restrict__ grad_in, const int* __restrict__ lengths, T* __restrict__ grad_out, int block_size) { int group = blockIdx.x; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { grad_out[group * block_size + i] = lengths[group] > 0 ? grad_in[group * block_size + i] / lengths[group] : grad_in[group * block_size + i]; } } template <typename SIndex, typename TParam, typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_adagrad_fused_length_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, TParam* param_mom, const SIndex* indices, const T* __restrict__ grad, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { const size_t gradIdx = group * block_size + threadIdx.x; // index for grad for (int line = start + threadIdx.y; line < end; line += blockDim.y) { // line: the idx in the indices // threadIdx.x: index in the embedding dimension const SIndex index = indices[line]; // the index-th row in the embedding table const size_t paramIdx = index * block_size + threadIdx.x; // index for param float gi = grad[gradIdx] + weight_decay * param[paramIdx]; float mom_new = gi * gi + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } } else { for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension const size_t gradIdx = group * block_size + i; // index for grad for (int line = start; line < end; ++line) { // line: the idx in the indices const SIndex index = indices[line]; // the index row in the embedding table const size_t paramIdx = index * block_size + i; // index for param float gi = grad[gradIdx] + weight_decay * param[paramIdx]; float mom_new = gi * gi + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } } } } template <typename SIndex, typename TParam, typename T, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_adagrad_fused_length_weighted_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, TParam* param_mom, const SIndex* indices, const T* __restrict__ grad, const T* __restrict__ weights, T* __restrict__ weights_grad_out, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // TODO: Tuning NumThreads for w_grad typedef hipcub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; // TODO(jianyuhuang): parallelize this outer loop for (int line = start; line < end; ++line) { T w_grad = 0; // line: the idx in the indices const SIndex index = indices[line]; // the index-th row in the embedding table // SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp also fuses // LengthsRangeFill + Gather operator. In the normal SLWS operator weight // is accessed via weights[line] but in most cases the weights are // generated by LengthsRangeFill and Gather operator. // For example, if lengths is [2, 3, 1] LengthsRangeFill will generate [0, // 1; 0, 1, 2; 0] and they are used as indices of Gather. // So if we fuse all of these, weights[line] just becomes // weights[line - start]. auto in_weight_temp = weights[line - start]; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension const size_t gradIdx = group * block_size + i; // index for in_grad const size_t paramIdx = index * block_size + i; // index for param // TODO: trying to reduce the variable number (common subexpression // elimination). auto in_grad_temp = grad[gradIdx]; w_grad += in_grad_temp * param[paramIdx]; auto out_grad_temp = in_weight_temp * in_grad_temp + weight_decay * param[paramIdx]; // TODO: split it into two kernels to make it more similar to exact // fusion kernel (not Approx on CPUs). float mom_new = out_grad_temp * out_grad_temp + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * out_grad_temp / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, hipcub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } // Construct a reverse map of offset_of_idx -> segment_id. template <typename SIndex> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void linear_index_weight_offsets_dedup_kernel( const SIndex* indices, const int* __restrict__ prefix_sum_length_data, // prefix of lengths int* __restrict__ seg_id_data // segment id ) { // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment for (int line = start + threadIdx.x; line < end; line += blockDim.x) { // line: the idx in the indices seg_id_data[line] = group; } } template < typename SIndex, typename TParam, typename T, bool ExactBlock = false, roundOption roundOpt = NEAREST> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments int num_indices, // number of indices const float epsilon, TParam* param, T* param_mom, const SIndex* indices, const T* __restrict__ grad, const SIndex* sorted_linear_ind_data, // sorted linear indices const int* __restrict__ sorted_seg_id_data, // sorted segment id const float* lr, ulong2 seed, float weight_decay = 0.f) { class randFactor<TParam, T, roundOpt> rand_factor( seed, blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x); const float LR = lr[0]; // num_indices blocks, each block process one index int sorted_linear_indice_id; if (ExactBlock) { sorted_linear_indice_id = blockIdx.x * blockDim.y + threadIdx.y; // the index of sorted_linear_ind } else { sorted_linear_indice_id = blockIdx.x; // the index of sorted_linear_ind } if (sorted_linear_indice_id >= num_indices) { // don't have warp divergence when embedding dim is multiple of 32 return; } // the index row in the embedding table SIndex index = sorted_linear_ind_data[sorted_linear_indice_id]; // check if this thread block is responsible for this whole linear index bool linear_index_start = (sorted_linear_indice_id == 0 || sorted_linear_ind_data[sorted_linear_indice_id - 1] != index); if (!linear_index_start) { // don't have warp divergence when embedding dim is multiple of 32 return; } if (ExactBlock) { // find the num of duplicated indices. int num_dup = 1; while (true) { int segment_continue = 0; if (sorted_linear_indice_id + num_dup + threadIdx.x < num_indices) { segment_continue = sorted_linear_ind_data[sorted_linear_indice_id + num_dup + threadIdx.x] == index; } #ifndef __HIP_PLATFORM_HCC__ int32_t num_dup_incr = __popc(__ballot_sync(0xFFFFFFFF, segment_continue)); #else int32_t num_dup_incr = __popc(__ballot(segment_continue)); #endif num_dup += num_dup_incr; if (num_dup_incr != kWarpSize) { break; } } float sum_squares = 0.0; extern __shared__ float x_ij[]; // we need to avoid index collision for the threads in the same block. // Different threadIdx.y works on different `index`. int sm_offset = threadIdx.y * block_size; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension float t_x_ij = 0.0; for (int dup_id = 0; dup_id < num_dup; dup_id++) { int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id]; t_x_ij += grad[group * block_size + i]; } t_x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]); sum_squares += t_x_ij * t_x_ij; x_ij[sm_offset + i] = t_x_ij; } // We have a strong assumption that blockDim.x = 32, which is equal to the warp size. float row_sum_squares_avg = warpReduceAllSum<float>(sum_squares) / static_cast<float>(block_size); float mom_new = param_mom[index] + row_sum_squares_avg; param_mom[index] = mom_new; // update param float step = LR / (sqrtf(mom_new) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t paramIdx = index * block_size + i; // index for param param[paramIdx] = rand_factor.convertTypeFromTargetToParam( rand_factor.convertTypeFromParamToTarget(param[paramIdx]) + x_ij[sm_offset + i] * step); } } else { // find the num of duplicated indices. int num_dup = 1; while (sorted_linear_indice_id + num_dup < num_indices && sorted_linear_ind_data[sorted_linear_indice_id + num_dup] == index) { num_dup += 1; } // TODO: Tuning NumThreads for sum_squares typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; int valid = min(block_size, blockDim.x); float sum_squares = 0.0; __shared__ float row_sum_squares_avg; extern __shared__ float x_ij[]; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension float t_x_ij = 0.0; for (int dup_id = 0; dup_id < num_dup; dup_id++) { int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id]; t_x_ij += grad[group * block_size + i]; } t_x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]); sum_squares += t_x_ij * t_x_ij; x_ij[i] = t_x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / static_cast<float>(block_size); float mom_new = param_mom[index] + row_sum_squares_avg; param_mom[index] = mom_new; } __syncthreads(); // update param float step = LR / (sqrtf(param_mom[index]) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t paramIdx = index * block_size + i; // index for param param[paramIdx] = rand_factor.convertTypeFromTargetToParam( rand_factor.convertTypeFromParamToTarget(param[paramIdx]) + x_ij[i] * step); } } } template <typename SIndex, typename TParam, typename T, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, T* param_mom, const SIndex* indices, const T* __restrict__ grad, const T* __restrict__ weights, T* __restrict__ weights_grad_out, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // TODO: Tuning NumThreads for w_grad typedef hipcub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int valid = min(block_size, blockDim.x); // for avg_square_weight. Can we reuse temp_storage __shared__ typename BlockReduce::TempStorage temp_storage2; // TODO(jianyuhuang): parallelize this outer loop for (int line = start; line < end; ++line) { T w_grad = 0; // i: index in the embedding dimension const SIndex index = indices[line]; auto in_weight_temp = weights[line - start]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const float x_ij = grad[group * block_size + i] + weight_decay * param[index * block_size + i]; sum_squares += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage2).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / static_cast<float>(block_size); param_mom[index] += static_cast<T>(row_sum_squares_avg * in_weight_temp * in_weight_temp); } __syncthreads(); // update param float step = LR / (sqrtf(param_mom[index]) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t gradIdx = group * block_size + i; // index for in_grad const size_t paramIdx = index * block_size + i; // index for param // TODO: trying to reduce the variable number (common subexpression // elimination). auto in_grad_temp = grad[gradIdx]; w_grad += in_grad_temp * param[paramIdx]; auto out_grad_temp = in_weight_temp * in_grad_temp + weight_decay * param[paramIdx]; // TODO: split it into two kernels to make it more similar to exact // fusion kernel (not Approx on CPUs). param[paramIdx] = out_grad_temp * step + param[paramIdx]; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, hipcub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } } // namespace template <typename T, typename TLengths, bool is_mean, class Context> class CUDASparseAdagradFusedWithSparseLengthsSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseAdagradFusedWithSparseLengthsSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDASparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size()); CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).numel(), Input(MOMENT_1).numel(), "Input Param size: ", Input(PARAM).numel(), " Input Moment size: ", Input(MOMENT_1).numel()); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { hipLaunchKernelGGL(( gradient_mean_kernel<T>) , dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(), grad, lengths, grad_buffer_data, block_size); C10_HIP_KERNEL_LAUNCH_CHECK(); } if (block_size <= maxThreads) { int multiple = ::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS); dim3 block(block_size, multiple); // calling cuda kernel with ExactBlock = true hipLaunchKernelGGL(( sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // calling cuda kernel with ExactBlock = false hipLaunchKernelGGL(( sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false>), dim3(num_lengths), dim3(maxThreads), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, class Context> class CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size()); CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { // Allocate output to an empty tensor Output(AUX_GRAD, n, at::dtype<T>()); return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); auto& weightsInput = Input(AUX_PARAM); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).numel(), Input(MOMENT_1).numel(), "Input Param size: ", Input(PARAM).numel(), " Input Moment size: ", Input(MOMENT_1).numel()); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); auto* weightGradsOutput = Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>()); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); const T* weights = weightsInput.template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (block_size > 128) { hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 512>), dim3(num_lengths), dim3(512), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (block_size > 64) { hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 128>), dim3(num_lengths), dim3(128), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (block_size > 32) { hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 64>), dim3(num_lengths), dim3(64), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 32>), dim3(num_lengths), dim3(32), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD); }; template <typename T, typename TLengths, bool is_mean, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), round_option_((roundOption)this->template GetSingleArgument<int>( "round_option", NEAREST)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == // outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; ulong2 seed; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { hipLaunchKernelGGL(( gradient_mean_kernel<T>) , dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(), grad, lengths, grad_buffer_data, block_size); C10_HIP_KERNEL_LAUNCH_CHECK(); } // 0: nearest rounding // 1: stochastic rounding if (round_option_ == STOCHASTIC) { seed.x = default_rng_seed_val; seed.y = maxThreads * block_size; } if (block_size <= maxThreads / 2 && block_size % 32 == 0) { // Fast path when the embedding dimension is a multiple of 32, using // WarpReduce. int multiple = ::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS); dim3 block(block_size, multiple); if (round_option_ == STOCHASTIC) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true, STOCHASTIC>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true, NEAREST>), dim3(num_lengths), dim3(block), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else { if (round_option_) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false, STOCHASTIC>) , dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false, NEAREST>) , dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; roundOption round_option_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, bool is_mean, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), round_option_((roundOption)this->template GetSingleArgument<int>( "round_option", NEAREST)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; CAFFE_ENFORCE( round_option_ == STOCHASTIC || round_option_ == NEAREST, "round_option_ should be either NEAREST or STOCHATIC"); const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); const int num_indices = indicesInput.dim(0); const int num_rows = Input(PARAM).dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { hipLaunchKernelGGL(( gradient_mean_kernel<T>) , dim3(num_lengths), dim3(::min(maxThreads, block_size)), 0, context_.cuda_stream(), grad, lengths, grad_buffer_data, block_size); C10_HIP_KERNEL_LAUNCH_CHECK(); } sorted_linear_ind_buffer_.ResizeLike(indicesInput); seg_id_buffer_.ResizeLike(indicesInput); sorted_seg_id_buffer_.ResizeLike(indicesInput); hipLaunchKernelGGL(( linear_index_weight_offsets_dedup_kernel<IndexType>) , dim3(num_lengths), dim3(32), 0, context_.cuda_stream(), indices, prefix_sum_length_data, seg_id_buffer_.template mutable_data<int>()); C10_HIP_KERNEL_LAUNCH_CHECK(); sort_pairs_wrapper<IndexType>( num_indices, num_rows, &sort_buffer_, &indicesInput, &sorted_linear_ind_buffer_, &seg_id_buffer_, &sorted_seg_id_buffer_, &context_); ulong2 seed; // 0: nearest rounding // 1: stochastic rounding if (round_option_ == STOCHASTIC) { seed.x = default_rng_seed_val; seed.y = maxThreads * block_size; } if (block_size <= maxThreads / 2 && block_size % 32 == 0) { // Fast path when the embedding dimension is a multiple of 32, using // WarpReduce. constexpr int kWarpNum = 8; const dim3 threads(kWarpSize, kWarpNum); const dim3 blocks((num_indices + kWarpNum - 1) / kWarpNum); CAFFE_ENFORCE_LE( kWarpNum * kWarpSize, maxThreads, "the total number of threads in a block should be smaller than or equal to maxThreads"); const int sm_size = block_size * kWarpNum * sizeof(float); // Maximum shared memory allocated per thread block is 48 KB on Maxwell/Pascal CAFFE_ENFORCE_LE( sm_size, 1024 * 48, "Block size is too big and will exceed the max size of the shared memory"); if (round_option_ == STOCHASTIC) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, true, STOCHASTIC>) , dim3(blocks), dim3(threads), sm_size, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, true, NEAREST>) , dim3(blocks), dim3(threads), sm_size, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else { const int sm_size = block_size * sizeof(float); // Maximum shared memory allocated per thread block is 48 KB on Maxwell/Pascal CAFFE_ENFORCE_LE( sm_size, 1024 * 48, "Block size is too big and will exceed the max size of the shared memory"); if (round_option_ == STOCHASTIC) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, false, STOCHASTIC>) , dim3(num_indices), dim3(::min(maxThreads, block_size)), sm_size, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, false, NEAREST>) , dim3(num_indices), dim3(::min(maxThreads, block_size)), sm_size, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor sort_buffer_{CUDA}; Tensor sorted_linear_ind_buffer_{CUDA}; Tensor seg_id_buffer_{CUDA}; Tensor sorted_seg_id_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; roundOption round_option_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthWeightedSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { Output(AUX_GRAD, n, at::dtype<T>()); return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); auto& weightsInput = Input(AUX_PARAM); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == // outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); auto* weightGradsOutput = Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>()); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); const T* weights = weightsInput.template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (block_size > 128) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 512>), dim3(num_lengths), dim3(512), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (block_size > 64) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 128>), dim3(num_lengths), dim3(128), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (block_size > 32) { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 64>), dim3(num_lengths), dim3(64), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 32>), dim3(num_lengths), dim3(32), 0, context_.cuda_stream(), prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_HIP_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD); }; // For GPU, the implementation of the exact and approx (RowWise)SparseAdagrad // fusion are both approximate implementations. // When we don't have the duplicated indices, the outputs are the same as the // CPU implementation. REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsSumGradient, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsSumGradientApprox, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsMeanGradient, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsMeanGradientApprox, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsWeightedSumGradient, CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox, CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsSumGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); } // namespace caffe2 #undef SEGREDUCE_MINBLOCKS
7aa0dc939296821f1143a4d72524780e3166a4f1.cu
#include <ATen/ATen.h> #include <c10/core/GeneratorImpl.h> #include <algorithm> #include <cub/device/device_radix_sort.cuh> #include "caffe2/sgd/adagrad_fused_op_gpu.cuh" #include "caffe2/utils/math.h" namespace caffe2 { namespace { void inclusive_scan_wrapper( const int* length_data, int num_lengths, Tensor* temp_buffer, Tensor* prefix_sum_out, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), num_lengths, context_->cuda_stream()); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); // Run inclusive prefix sum cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), num_lengths, context_->cuda_stream()); } template <typename SIndex> void sort_pairs_wrapper( int num_indices, int num_rows, Tensor* temp_buffer, const Tensor* linear_ind_buffer_, Tensor* sorted_linear_ind_buffer_, const Tensor* seg_id_buffer_, Tensor* sorted_seg_id_buffer_, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; cub::DeviceRadixSort::SortPairs( nullptr, temp_storage_bytes, linear_ind_buffer_->template data<SIndex>(), sorted_linear_ind_buffer_->template mutable_data<SIndex>(), seg_id_buffer_->template data<int>(), sorted_seg_id_buffer_->template mutable_data<int>(), num_indices, 0, int(log2(float(num_rows)) + 1), context_->cuda_stream(), false); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); cub::DeviceRadixSort::SortPairs( d_temp_storage, temp_storage_bytes, linear_ind_buffer_->template data<SIndex>(), sorted_linear_ind_buffer_->template mutable_data<SIndex>(), seg_id_buffer_->template data<int>(), sorted_seg_id_buffer_->template mutable_data<int>(), num_indices, 0, int(log2(float(num_rows)) + 1), context_->cuda_stream(), false); } template <typename T> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void gradient_mean_kernel( const T* __restrict__ grad_in, const int* __restrict__ lengths, T* __restrict__ grad_out, int block_size) { int group = blockIdx.x; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { grad_out[group * block_size + i] = lengths[group] > 0 ? grad_in[group * block_size + i] / lengths[group] : grad_in[group * block_size + i]; } } template <typename SIndex, typename TParam, typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_adagrad_fused_length_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, TParam* param_mom, const SIndex* indices, const T* __restrict__ grad, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { const size_t gradIdx = group * block_size + threadIdx.x; // index for grad for (int line = start + threadIdx.y; line < end; line += blockDim.y) { // line: the idx in the indices // threadIdx.x: index in the embedding dimension const SIndex index = indices[line]; // the index-th row in the embedding table const size_t paramIdx = index * block_size + threadIdx.x; // index for param float gi = grad[gradIdx] + weight_decay * param[paramIdx]; float mom_new = gi * gi + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } } else { for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension const size_t gradIdx = group * block_size + i; // index for grad for (int line = start; line < end; ++line) { // line: the idx in the indices const SIndex index = indices[line]; // the index row in the embedding table const size_t paramIdx = index * block_size + i; // index for param float gi = grad[gradIdx] + weight_decay * param[paramIdx]; float mom_new = gi * gi + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * gi / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } } } } template <typename SIndex, typename TParam, typename T, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_adagrad_fused_length_weighted_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, TParam* param_mom, const SIndex* indices, const T* __restrict__ grad, const T* __restrict__ weights, T* __restrict__ weights_grad_out, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // TODO: Tuning NumThreads for w_grad typedef cub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; // TODO(jianyuhuang): parallelize this outer loop for (int line = start; line < end; ++line) { T w_grad = 0; // line: the idx in the indices const SIndex index = indices[line]; // the index-th row in the embedding table // SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp also fuses // LengthsRangeFill + Gather operator. In the normal SLWS operator weight // is accessed via weights[line] but in most cases the weights are // generated by LengthsRangeFill and Gather operator. // For example, if lengths is [2, 3, 1] LengthsRangeFill will generate [0, // 1; 0, 1, 2; 0] and they are used as indices of Gather. // So if we fuse all of these, weights[line] just becomes // weights[line - start]. auto in_weight_temp = weights[line - start]; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension const size_t gradIdx = group * block_size + i; // index for in_grad const size_t paramIdx = index * block_size + i; // index for param // TODO: trying to reduce the variable number (common subexpression // elimination). auto in_grad_temp = grad[gradIdx]; w_grad += in_grad_temp * param[paramIdx]; auto out_grad_temp = in_weight_temp * in_grad_temp + weight_decay * param[paramIdx]; // TODO: split it into two kernels to make it more similar to exact // fusion kernel (not Approx on CPUs). float mom_new = out_grad_temp * out_grad_temp + param_mom[paramIdx]; param_mom[paramIdx] = mom_new; float param_new = LR * out_grad_temp / (sqrtf(mom_new) + epsilon) + param[paramIdx]; param[paramIdx] = param_new; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, cub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } // Construct a reverse map of offset_of_idx -> segment_id. template <typename SIndex> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void linear_index_weight_offsets_dedup_kernel( const SIndex* indices, const int* __restrict__ prefix_sum_length_data, // prefix of lengths int* __restrict__ seg_id_data // segment id ) { // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment for (int line = start + threadIdx.x; line < end; line += blockDim.x) { // line: the idx in the indices seg_id_data[line] = group; } } template < typename SIndex, typename TParam, typename T, bool ExactBlock = false, roundOption roundOpt = NEAREST> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments int num_indices, // number of indices const float epsilon, TParam* param, T* param_mom, const SIndex* indices, const T* __restrict__ grad, const SIndex* sorted_linear_ind_data, // sorted linear indices const int* __restrict__ sorted_seg_id_data, // sorted segment id const float* lr, ulong2 seed, float weight_decay = 0.f) { class randFactor<TParam, T, roundOpt> rand_factor( seed, blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x); const float LR = lr[0]; // num_indices blocks, each block process one index int sorted_linear_indice_id; if (ExactBlock) { sorted_linear_indice_id = blockIdx.x * blockDim.y + threadIdx.y; // the index of sorted_linear_ind } else { sorted_linear_indice_id = blockIdx.x; // the index of sorted_linear_ind } if (sorted_linear_indice_id >= num_indices) { // don't have warp divergence when embedding dim is multiple of 32 return; } // the index row in the embedding table SIndex index = sorted_linear_ind_data[sorted_linear_indice_id]; // check if this thread block is responsible for this whole linear index bool linear_index_start = (sorted_linear_indice_id == 0 || sorted_linear_ind_data[sorted_linear_indice_id - 1] != index); if (!linear_index_start) { // don't have warp divergence when embedding dim is multiple of 32 return; } if (ExactBlock) { // find the num of duplicated indices. int num_dup = 1; while (true) { int segment_continue = 0; if (sorted_linear_indice_id + num_dup + threadIdx.x < num_indices) { segment_continue = sorted_linear_ind_data[sorted_linear_indice_id + num_dup + threadIdx.x] == index; } #ifndef __HIP_PLATFORM_HCC__ int32_t num_dup_incr = __popc(__ballot_sync(0xFFFFFFFF, segment_continue)); #else int32_t num_dup_incr = __popc(__ballot(segment_continue)); #endif num_dup += num_dup_incr; if (num_dup_incr != kWarpSize) { break; } } float sum_squares = 0.0; extern __shared__ float x_ij[]; // we need to avoid index collision for the threads in the same block. // Different threadIdx.y works on different `index`. int sm_offset = threadIdx.y * block_size; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension float t_x_ij = 0.0; for (int dup_id = 0; dup_id < num_dup; dup_id++) { int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id]; t_x_ij += grad[group * block_size + i]; } t_x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]); sum_squares += t_x_ij * t_x_ij; x_ij[sm_offset + i] = t_x_ij; } // We have a strong assumption that blockDim.x = 32, which is equal to the warp size. float row_sum_squares_avg = warpReduceAllSum<float>(sum_squares) / static_cast<float>(block_size); float mom_new = param_mom[index] + row_sum_squares_avg; param_mom[index] = mom_new; // update param float step = LR / (sqrtf(mom_new) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t paramIdx = index * block_size + i; // index for param param[paramIdx] = rand_factor.convertTypeFromTargetToParam( rand_factor.convertTypeFromParamToTarget(param[paramIdx]) + x_ij[sm_offset + i] * step); } } else { // find the num of duplicated indices. int num_dup = 1; while (sorted_linear_indice_id + num_dup < num_indices && sorted_linear_ind_data[sorted_linear_indice_id + num_dup] == index) { num_dup += 1; } // TODO: Tuning NumThreads for sum_squares typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; int valid = min(block_size, blockDim.x); float sum_squares = 0.0; __shared__ float row_sum_squares_avg; extern __shared__ float x_ij[]; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { // i: index in the embedding dimension float t_x_ij = 0.0; for (int dup_id = 0; dup_id < num_dup; dup_id++) { int group = sorted_seg_id_data[sorted_linear_indice_id + dup_id]; t_x_ij += grad[group * block_size + i]; } t_x_ij += weight_decay * rand_factor.convertTypeFromParamToTarget(param[index * block_size + i]); sum_squares += t_x_ij * t_x_ij; x_ij[i] = t_x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / static_cast<float>(block_size); float mom_new = param_mom[index] + row_sum_squares_avg; param_mom[index] = mom_new; } __syncthreads(); // update param float step = LR / (sqrtf(param_mom[index]) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t paramIdx = index * block_size + i; // index for param param[paramIdx] = rand_factor.convertTypeFromTargetToParam( rand_factor.convertTypeFromParamToTarget(param[paramIdx]) + x_ij[i] * step); } } } template <typename SIndex, typename TParam, typename T, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel( const int* __restrict__ prefix_sum_length_data, // prefix of lengths // (offsets for the // segments) int N, // number of rows (hash size) of embedding table int block_size, // embedding dimension size int num_lengths, // number of segments const float epsilon, TParam* param, T* param_mom, const SIndex* indices, const T* __restrict__ grad, const T* __restrict__ weights, T* __restrict__ weights_grad_out, const float* lr, float weight_decay = 0.f) { const float LR = lr[0]; // num_lengths blocks, each block process one segment int group = blockIdx.x; // the group-th segment int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; // start offset of the segment int end = prefix_sum_length_data[group]; // end offset of the segment CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // TODO: Tuning NumThreads for w_grad typedef cub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int valid = min(block_size, blockDim.x); // for avg_square_weight. Can we reuse temp_storage __shared__ typename BlockReduce::TempStorage temp_storage2; // TODO(jianyuhuang): parallelize this outer loop for (int line = start; line < end; ++line) { T w_grad = 0; // i: index in the embedding dimension const SIndex index = indices[line]; auto in_weight_temp = weights[line - start]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const float x_ij = grad[group * block_size + i] + weight_decay * param[index * block_size + i]; sum_squares += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage2).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / static_cast<float>(block_size); param_mom[index] += static_cast<T>(row_sum_squares_avg * in_weight_temp * in_weight_temp); } __syncthreads(); // update param float step = LR / (sqrtf(param_mom[index]) + epsilon); for (int i = threadIdx.x; i < block_size; i += blockDim.x) { const size_t gradIdx = group * block_size + i; // index for in_grad const size_t paramIdx = index * block_size + i; // index for param // TODO: trying to reduce the variable number (common subexpression // elimination). auto in_grad_temp = grad[gradIdx]; w_grad += in_grad_temp * param[paramIdx]; auto out_grad_temp = in_weight_temp * in_grad_temp + weight_decay * param[paramIdx]; // TODO: split it into two kernels to make it more similar to exact // fusion kernel (not Approx on CPUs). param[paramIdx] = out_grad_temp * step + param[paramIdx]; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, cub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } } // namespace template <typename T, typename TLengths, bool is_mean, class Context> class CUDASparseAdagradFusedWithSparseLengthsSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseAdagradFusedWithSparseLengthsSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDASparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size()); CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).numel(), Input(MOMENT_1).numel(), "Input Param size: ", Input(PARAM).numel(), " Input Moment size: ", Input(MOMENT_1).numel()); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { gradient_mean_kernel<T> <<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>( grad, lengths, grad_buffer_data, block_size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } if (block_size <= maxThreads) { int multiple = std::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS); dim3 block(block_size, multiple); // calling cuda kernel with ExactBlock = true sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true><<<num_lengths, block, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // calling cuda kernel with ExactBlock = false sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false><<<num_lengths, maxThreads, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, class Context> class CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDASparseAdagradFusedWithSparseLengthWeightedSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(PARAM).size(), Input(MOMENT_1).size()); CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { // Allocate output to an empty tensor Output(AUX_GRAD, n, at::dtype<T>()); return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); auto& weightsInput = Input(AUX_PARAM); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).numel(), Input(MOMENT_1).numel(), "Input Param size: ", Input(PARAM).numel(), " Input Moment size: ", Input(MOMENT_1).numel()); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); auto* weightGradsOutput = Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>()); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); const T* weights = weightsInput.template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<TParam>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (block_size > 128) { sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 512><<<num_lengths, 512, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (block_size > 64) { sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 128><<<num_lengths, 128, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (block_size > 32) { sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 64><<<num_lengths, 64, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 32><<<num_lengths, 32, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD); }; template <typename T, typename TLengths, bool is_mean, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), round_option_((roundOption)this->template GetSingleArgument<int>( "round_option", NEAREST)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == // outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; ulong2 seed; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { gradient_mean_kernel<T> <<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>( grad, lengths, grad_buffer_data, block_size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // 0: nearest rounding // 1: stochastic rounding if (round_option_ == STOCHASTIC) { seed.x = default_rng_seed_val; seed.y = maxThreads * block_size; } if (block_size <= maxThreads / 2 && block_size % 32 == 0) { // Fast path when the embedding dimension is a multiple of 32, using // WarpReduce. int multiple = std::min(maxThreads / block_size, SEGREDUCE_MINBLOCKS); dim3 block(block_size, multiple); if (round_option_ == STOCHASTIC) { rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true, STOCHASTIC><<<num_lengths, block, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, true, NEAREST><<<num_lengths, block, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else { if (round_option_) { rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false, STOCHASTIC> <<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { rowwise_sparse_adagrad_fused_length_sum_gradient_kernel< IndexType, TParam, T, false, NEAREST> <<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; roundOption round_option_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, bool is_mean, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), round_option_((roundOption)this->template GetSingleArgument<int>( "round_option", NEAREST)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthSumGradientOp" << " weight_decay_=" << weight_decay_; CAFFE_ENFORCE( round_option_ == STOCHASTIC || round_option_ == NEAREST, "round_option_ should be either NEAREST or STOCHATIC"); const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); const int num_indices = indicesInput.dim(0); const int num_rows = Input(PARAM).dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lengths = lengthsInput.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (is_mean) { grad_buffer_.ResizeLike(segmentGradsInput); } auto* grad_buffer_data = is_mean ? grad_buffer_.template mutable_data<T>() : NULL; if (is_mean) { gradient_mean_kernel<T> <<<num_lengths, std::min(maxThreads, block_size), 0, context_.cuda_stream()>>>( grad, lengths, grad_buffer_data, block_size); C10_CUDA_KERNEL_LAUNCH_CHECK(); } sorted_linear_ind_buffer_.ResizeLike(indicesInput); seg_id_buffer_.ResizeLike(indicesInput); sorted_seg_id_buffer_.ResizeLike(indicesInput); linear_index_weight_offsets_dedup_kernel<IndexType> <<<num_lengths, 32, 0, context_.cuda_stream()>>>( indices, prefix_sum_length_data, seg_id_buffer_.template mutable_data<int>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); sort_pairs_wrapper<IndexType>( num_indices, num_rows, &sort_buffer_, &indicesInput, &sorted_linear_ind_buffer_, &seg_id_buffer_, &sorted_seg_id_buffer_, &context_); ulong2 seed; // 0: nearest rounding // 1: stochastic rounding if (round_option_ == STOCHASTIC) { seed.x = default_rng_seed_val; seed.y = maxThreads * block_size; } if (block_size <= maxThreads / 2 && block_size % 32 == 0) { // Fast path when the embedding dimension is a multiple of 32, using // WarpReduce. constexpr int kWarpNum = 8; const dim3 threads(kWarpSize, kWarpNum); const dim3 blocks((num_indices + kWarpNum - 1) / kWarpNum); CAFFE_ENFORCE_LE( kWarpNum * kWarpSize, maxThreads, "the total number of threads in a block should be smaller than or equal to maxThreads"); const int sm_size = block_size * kWarpNum * sizeof(float); // Maximum shared memory allocated per thread block is 48 KB on Maxwell/Pascal CAFFE_ENFORCE_LE( sm_size, 1024 * 48, "Block size is too big and will exceed the max size of the shared memory"); if (round_option_ == STOCHASTIC) { rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, true, STOCHASTIC> <<<blocks, threads, sm_size, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, true, NEAREST> <<<blocks, threads, sm_size, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else { const int sm_size = block_size * sizeof(float); // Maximum shared memory allocated per thread block is 48 KB on Maxwell/Pascal CAFFE_ENFORCE_LE( sm_size, 1024 * 48, "Block size is too big and will exceed the max size of the shared memory"); if (round_option_ == STOCHASTIC) { rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, false, STOCHASTIC> <<<num_indices, std::min(maxThreads, block_size), sm_size, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { rowwise_sparse_adagrad_fused_length_sum_gradient_dedup_kernel< IndexType, TParam, T, false, NEAREST> <<<num_indices, std::min(maxThreads, block_size), sm_size, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, num_indices, epsilon_, paramOut, momentOut, indices, is_mean ? grad_buffer_data : grad, sorted_linear_ind_buffer_.template data<IndexType>(), sorted_seg_id_buffer_.template data<int>(), lr, seed, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; Tensor sort_buffer_{CUDA}; Tensor sorted_linear_ind_buffer_{CUDA}; Tensor seg_id_buffer_{CUDA}; Tensor sorted_seg_id_buffer_{CUDA}; Tensor grad_buffer_{CUDA}; protected: T epsilon_; roundOption round_option_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1); }; template <typename T, typename TLengths, class Context> class CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)), weight_decay_( this->template GetSingleArgument<float>("weight_decay", 0.f)) { VLOG(1) << "gradient optimization operator in use: " << "CUDARowWiseSparseAdagradFusedWithSparseLengthWeightedSumGradientOp" << " weight_decay_=" << weight_decay_; const T decay = this->template GetSingleArgument<T>("decay", 1.0f); CAFFE_ENFORCE_EQ(decay, 1.0, "Decay is not supported for SparseAdagradOp"); } bool RunOnDevice() override { // Enforce shapes CAFFE_ENFORCE_EQ(Input(LR).size(), 1); CAFFE_ENFORCE_EQ( Input(PARAM).size_from_dim(1), Input(GRAD).size_from_dim(Input(INDICES).ndim())); return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto n = Input(INDICES).size(); if (n == 0) { Output(AUX_GRAD, n, at::dtype<T>()); return true; } return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(PARAM)); } template <typename IndexType, typename TParam> bool DoRunWithType2() { auto& segmentGradsInput = Input(GRAD); auto& lengthsInput = Input(LENGTHS); auto& indicesInput = Input(INDICES); auto& weightsInput = Input(AUX_PARAM); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_GT(Input(GRAD).dim(), 0); // Enforce: // number of rows: input(embedding/momentum) == // outputs(embedding/momentum) CAFFE_ENFORCE_EQ( Input(PARAM).dim(0), Input(MOMENT_1).dim(0), "Input Param number of rows: ", Input(PARAM).dim(0), " Input Moment size: ", Input(MOMENT_1).dim(0)); const int num_lengths = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(num_lengths == segmentGradsInput.dim(0)); int output_0dim = indicesInput.dim(0); auto* weightGradsOutput = Output(AUX_GRAD, indicesInput.sizes(), at::dtype<T>()); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (num_lengths <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), num_lengths, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const auto* lr = Input(LR).template data<T>(); const auto* indices = Input(INDICES).template data<IndexType>(); const T* grad = Input(GRAD).template data<T>(); const T* weights = weightsInput.template data<T>(); auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<TParam>(); auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<T>(); int N = output_0dim; int block_size = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (block_size > 128) { rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 512><<<num_lengths, 512, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (block_size > 64) { rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 128><<<num_lengths, 128, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (block_size > 32) { rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 64><<<num_lengths, 64, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { rowwise_sparse_adagrad_fused_length_weighted_sum_gradient_kernel< IndexType, TParam, T, 32><<<num_lengths, 32, 0, context_.cuda_stream()>>>( prefix_sum_length_data, N, block_size, num_lengths, epsilon_, paramOut, momentOut, indices, grad, weights, out_weight_grads, lr, weight_decay_); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } private: // member field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; protected: T epsilon_; T weight_decay_; INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS); OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD); }; // For GPU, the implementation of the exact and approx (RowWise)SparseAdagrad // fusion are both approximate implementations. // When we don't have the duplicated indices, the outputs are the same as the // CPU implementation. REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsSumGradient, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsSumGradientApprox, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsMeanGradient, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsMeanGradientApprox, CUDASparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsWeightedSumGradient, CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox, CUDASparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsSumGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsSumGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, false, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsMeanGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientExactOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsMeanGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsSumGradientOp< float, int, true, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradient, CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientApprox, CUDARowWiseSparseAdagradFusedWithSparseLengthsWeightedSumGradientOp< float, int, CUDAContext>); } // namespace caffe2 #undef SEGREDUCE_MINBLOCKS
0f33d662bf084b73182b974197b1f7066ed8b856.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "./kern.cuh" #include "megdnn/dtype.h" #include "src/common/elemwise_multi_type/kern_defs.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; namespace { template <typename T> struct __builtin_align__(sizeof(T) * 4) Packed4 { T v[4]; }; template <typename stype, typename dtype> __global__ void kern_1d( const stype* x, const float* k, const float* b, dtype* y, uint32_t n) { elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; uint32_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { y[i] = op(x[i], k[i], b[i]); } } template <typename stype, typename dtype> void invoke_kern_1d( const stype* x, const float* k, const float* b, dtype* y, uint32_t n, hipStream_t stream) { dim3 threads = NR_THREADS; dim3 blocks = DIVUP(n, NR_THREADS); hipLaunchKernelGGL(( kern_1d<stype, dtype>), dim3(blocks), dim3(threads), 0, stream, x, k, b, y, n); after_kernel_launch(); } template <typename stype, typename dtype> __global__ void kern_2d_fallback( const stype* x, const float* k, const float* b, dtype* y, uint32_t m, uint32_t n) { uint32_t i = threadIdx.y + blockIdx.y * blockDim.y; uint32_t j = threadIdx.x + blockIdx.x * blockDim.x; elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; if (i < m && j < n) { y[i * n + j] = op(x[i * n + j], k[j], b[j]); } } template <typename stype, typename dtype> __global__ void kern_2d_mul4( const stype* __restrict x, const float* __restrict k, const float* __restrict b, dtype* y_, uint32_t m, uint32_t n) { uint32_t i = threadIdx.y + blockIdx.y * blockDim.y; uint32_t j = threadIdx.x + blockIdx.x * blockDim.x; elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; Packed4<dtype>* __restrict__ y = (Packed4<dtype>*)y_; if (i < m && j < n) { stype x0 = x[(i * n + j) * 4 + 0]; stype x1 = x[(i * n + j) * 4 + 1]; stype x2 = x[(i * n + j) * 4 + 2]; stype x3 = x[(i * n + j) * 4 + 3]; float k0 = k[j * 4 + 0]; float k1 = k[j * 4 + 1]; float k2 = k[j * 4 + 2]; float k3 = k[j * 4 + 3]; float b0 = b[j * 4 + 0]; float b1 = b[j * 4 + 1]; float b2 = b[j * 4 + 2]; float b3 = b[j * 4 + 3]; Packed4<dtype> pack; pack.v[0] = op(x0, k0, b0); pack.v[1] = op(x1, k1, b1); pack.v[2] = op(x2, k2, b2); pack.v[3] = op(x3, k3, b3); y[i * n + j] = pack; } } template <typename stype, typename dtype> void invoke_kern_2d( const stype* x, const float* k, const float* b, dtype* y, uint32_t m, uint32_t n, hipStream_t stream) { if (n % 4 == 0 && is_same<dtype, int8_t>::value) { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(n / 4, NR_THREADS_X), DIVUP(m, NR_THREADS_Y)); // each thread process 4 elems // template to avoid compile error hipLaunchKernelGGL(( kern_2d_mul4<stype, dtype>) , dim3(blocks), dim3(threads), 0, stream, x, k, b, y, m, n / 4); } else { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(n, NR_THREADS_X), DIVUP(m, NR_THREADS_Y)); hipLaunchKernelGGL(( kern_2d_fallback<stype, dtype>) , dim3(blocks), dim3(threads), 0, stream, x, k, b, y, m, n); after_kernel_launch(); } } } // anonymous namespace using namespace megdnn; template <typename stype> void cuda::elemwise_multi_type::fma3_iXxf32xf32xi8_bcast_1x( const stype* x, const float* k, const float* b, dt_int8* y, uint32_t m, uint32_t n, hipStream_t stream) { if (m == 1) { invoke_kern_1d(x, k, b, y, n, stream); } else { invoke_kern_2d(x, k, b, y, m, n, stream); } } #define INST(stype) \ template void cuda::elemwise_multi_type::fma3_iXxf32xf32xi8_bcast_1x<stype>( \ const stype*, const float*, const float*, dt_int8*, uint32_t, uint32_t, \ hipStream_t) #define cb(t) INST(DTypeTrait<t>::ctype); MEGDNN_FOREACH_COMPUTING_DTYPE_INT(cb) #undef cb #undef INST
0f33d662bf084b73182b974197b1f7066ed8b856.cu
#include "./kern.cuh" #include "megdnn/dtype.h" #include "src/common/elemwise_multi_type/kern_defs.cuh" #include "src/cuda/utils.cuh" using namespace megdnn; namespace { template <typename T> struct __builtin_align__(sizeof(T) * 4) Packed4 { T v[4]; }; template <typename stype, typename dtype> __global__ void kern_1d( const stype* x, const float* k, const float* b, dtype* y, uint32_t n) { elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; uint32_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { y[i] = op(x[i], k[i], b[i]); } } template <typename stype, typename dtype> void invoke_kern_1d( const stype* x, const float* k, const float* b, dtype* y, uint32_t n, cudaStream_t stream) { dim3 threads = NR_THREADS; dim3 blocks = DIVUP(n, NR_THREADS); kern_1d<stype, dtype><<<blocks, threads, 0, stream>>>(x, k, b, y, n); after_kernel_launch(); } template <typename stype, typename dtype> __global__ void kern_2d_fallback( const stype* x, const float* k, const float* b, dtype* y, uint32_t m, uint32_t n) { uint32_t i = threadIdx.y + blockIdx.y * blockDim.y; uint32_t j = threadIdx.x + blockIdx.x * blockDim.x; elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; if (i < m && j < n) { y[i * n + j] = op(x[i * n + j], k[j], b[j]); } } template <typename stype, typename dtype> __global__ void kern_2d_mul4( const stype* __restrict x, const float* __restrict k, const float* __restrict b, dtype* y_, uint32_t m, uint32_t n) { uint32_t i = threadIdx.y + blockIdx.y * blockDim.y; uint32_t j = threadIdx.x + blockIdx.x * blockDim.x; elemwise_multi_type::Fma3iXxf32xf32xiYOp<stype, dtype> op; Packed4<dtype>* __restrict__ y = (Packed4<dtype>*)y_; if (i < m && j < n) { stype x0 = x[(i * n + j) * 4 + 0]; stype x1 = x[(i * n + j) * 4 + 1]; stype x2 = x[(i * n + j) * 4 + 2]; stype x3 = x[(i * n + j) * 4 + 3]; float k0 = k[j * 4 + 0]; float k1 = k[j * 4 + 1]; float k2 = k[j * 4 + 2]; float k3 = k[j * 4 + 3]; float b0 = b[j * 4 + 0]; float b1 = b[j * 4 + 1]; float b2 = b[j * 4 + 2]; float b3 = b[j * 4 + 3]; Packed4<dtype> pack; pack.v[0] = op(x0, k0, b0); pack.v[1] = op(x1, k1, b1); pack.v[2] = op(x2, k2, b2); pack.v[3] = op(x3, k3, b3); y[i * n + j] = pack; } } template <typename stype, typename dtype> void invoke_kern_2d( const stype* x, const float* k, const float* b, dtype* y, uint32_t m, uint32_t n, cudaStream_t stream) { if (n % 4 == 0 && is_same<dtype, int8_t>::value) { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(n / 4, NR_THREADS_X), DIVUP(m, NR_THREADS_Y)); // each thread process 4 elems // template to avoid compile error kern_2d_mul4<stype, dtype> <<<blocks, threads, 0, stream>>>(x, k, b, y, m, n / 4); } else { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(n, NR_THREADS_X), DIVUP(m, NR_THREADS_Y)); kern_2d_fallback<stype, dtype> <<<blocks, threads, 0, stream>>>(x, k, b, y, m, n); after_kernel_launch(); } } } // anonymous namespace using namespace megdnn; template <typename stype> void cuda::elemwise_multi_type::fma3_iXxf32xf32xi8_bcast_1x( const stype* x, const float* k, const float* b, dt_int8* y, uint32_t m, uint32_t n, cudaStream_t stream) { if (m == 1) { invoke_kern_1d(x, k, b, y, n, stream); } else { invoke_kern_2d(x, k, b, y, m, n, stream); } } #define INST(stype) \ template void cuda::elemwise_multi_type::fma3_iXxf32xf32xi8_bcast_1x<stype>( \ const stype*, const float*, const float*, dt_int8*, uint32_t, uint32_t, \ cudaStream_t) #define cb(t) INST(DTypeTrait<t>::ctype); MEGDNN_FOREACH_COMPUTING_DTYPE_INT(cb) #undef cb #undef INST
ab088e28ed5d9a1f431455937e583ec3ca3eaa13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include"struct.h" #include "init_cuda.h" #include "cuda_scft.h" #include <errno.h> #include <typeinfo> #include"cuda_aid.cuh" extern void average_value(std::vector<double*> data,GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern void getConc(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern double Free_um(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern void scft_main_loop(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ hipEvent_t start,stop; float msec; hipError_t error; error=hipEventCreate(&start); error=hipEventCreate(&stop); error=hipEventCreate(&start); error=hipEventCreate(&stop); error=hipEventRecord(start,0); double energy=Free_um(gpu_info,cufft_info); error=hipEventRecord(stop,0); hipEventSynchronize(stop); error=hipEventElapsedTime(&msec,start,stop); if(error!=hipSuccess) printf("fft_test did not successfully detect run time\n"); printf("time=%0.10f\n",msec); } extern double Free_um(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int iter=0; double *freeEnergy=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeOld=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeW=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeAB=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeS=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeDiff=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeWsurf=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *inCompMax=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *fpsum=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *psum=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double MaxInCompMax=10.0; do{ MaxInCompMax=10.0; iter=iter+1; average_value(cufft_info->wa_cu,gpu_info,cufft_info); average_value(cufft_info->wb_cu,gpu_info,cufft_info); getConc(gpu_info,cufft_info); dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); for(int i=0;i<cufft_info->NxNyNz;i++){ //int batch=2; //printf("%g %g %g\n",cufft_info->Pha_cu[gpu_index][i+cufft_info->NxNyNz*batch],cufft_info->wa_cu[gpu_index][i+cufft_info->NxNyNz*batch],cufft_info->wb_cu[gpu_index][i+cufft_info->NxNyNz*batch]); } //printf("%g %g\n",cufft_info->hAB,cufft_info->lambda); hipLaunchKernelGGL(( phi_w_constrained), dim3(grid),dim3(cufft_info->batch),0,gpu_info->stream[gpu_index], cufft_info->wa_cu[gpu_index],cufft_info->wb_cu[gpu_index],cufft_info->pha_cu[gpu_index],cufft_info->phb_cu[gpu_index], cufft_info->Pha_cu[gpu_index],cufft_info->hAB,cufft_info->lambda,cufft_info->wopt); checkCudaErrors(hipDeviceSynchronize()); } if(iter%cufft_info->AverIt==0){ MaxInCompMax=0.0; for(int i=0;i<cufft_info->batch*gpu_info->GPU_N;i++){ freeW[i]=0.0; freeAB[i]=0.0; freeS[i]=0.0; freeWsurf[i]=0.0; inCompMax[i]=0.0; } for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++) for(int i=0;i<cufft_info->batch;i++){ char fname[200]; sprintf(fname,"./Density/pha_%d.dat",i+1); FILE *dp=fopen(fname,"w"); fprintf(dp,"Nx=%d, Ny=%d, Nz=%d\n",cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); fprintf(dp,"dx=%g, dy=%g, dz=%g\n",cufft_info->dx,cufft_info->dy,cufft_info->dz); for(long ijk=0;ijk<cufft_info->NxNyNz;ijk++){ psum[i+gpu_index*cufft_info->batch]=1-cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]-cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]; fpsum[i+gpu_index*cufft_info->batch]=fabs(psum[i+gpu_index*cufft_info->batch]); if(fpsum[i+gpu_index*cufft_info->batch]>inCompMax[i+gpu_index*cufft_info->batch]) inCompMax[i+gpu_index*cufft_info->batch]=fpsum[i+gpu_index*cufft_info->batch]; freeAB[i+gpu_index*cufft_info->batch]=freeAB[i+gpu_index*cufft_info->batch]+cufft_info->hAB*cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]; freeW[i+gpu_index*cufft_info->batch]=freeW[i+gpu_index*cufft_info->batch]-(cufft_info->wa_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]+cufft_info->wb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]); fprintf(dp,"%8.8e %8.8e %8.8e %8.8e\n",cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->wa_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->wb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]); }// end for ijk fclose(dp); freeAB[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; //printf("freeW=%0.10f\n",freeW); freeW[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; freeWsurf[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; freeS[i+gpu_index*cufft_info->batch]=-log(cufft_info->ql[gpu_index][i]); //printf("%d %.10f %.10f %.10f %.10f\n",i,qCab[0],qCab[1],freeS[i],-log(qCab[1])); freeOld[i+gpu_index*cufft_info->batch]=freeEnergy[i+gpu_index*cufft_info->batch]; freeEnergy[i+gpu_index*cufft_info->batch]=freeAB[i+gpu_index*cufft_info->batch]+freeW[i+gpu_index*cufft_info->batch]+freeS[i+gpu_index*cufft_info->batch]; printf("GPU %d batch %d: %5d : %.8e, %.8e, %.8e,%.8e, %.8e\n", gpu_index,i,iter, freeEnergy[i+gpu_index*cufft_info->batch],freeAB[i+gpu_index*cufft_info->batch],freeW[i+gpu_index*cufft_info->batch], freeS[i+gpu_index*cufft_info->batch],inCompMax[i+gpu_index*cufft_info->batch]); //printf("%g %g\n",inCompMax[0],inCompMax[i+gpu_index*cufft_info->batch]); if(inCompMax[i+gpu_index*cufft_info->batch]>MaxInCompMax) MaxInCompMax=inCompMax[i+gpu_index*cufft_info->batch]; }// end for i }//end if cufft_info->AverIt==0 //printf("%g %g\n",MaxInCompMax,cufft_info->ErrorinCompMax); }while(iter<cufft_info->MaxIT&&MaxInCompMax>cufft_info->ErrorinCompMax);//! end loop do free(freeEnergy); free(freeOld); free(freeW); free(freeAB); free(freeS); free(freeDiff); free(freeWsurf); free(inCompMax); free(psum); free(fpsum); return 0; } extern void getConc(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int gpu_index; dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); int threads=1024; size_t smemSize = threads * sizeof(double)*2;//(threads <= 32) ? 2 * threads * sizeof(double) : average_value(cufft_info->wa_cu,gpu_info,cufft_info); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( qInt_init), dim3(grid),dim3(cufft_info->batch),0,gpu_info->stream[gpu_index], cufft_info->qInt_cu[gpu_index]); checkCudaErrors(hipDeviceSynchronize()); } sovDifFft(gpu_info,cufft_info,cufft_info->qa_cu,cufft_info->wa_cu,cufft_info->NsA,1); sovDifFft(gpu_info,cufft_info,cufft_info->qcb_cu,cufft_info->wb_cu,cufft_info->NsB,-1); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( qa_to_qInt), dim3(grid),dim3(cufft_info->batch),0,gpu_info->stream[gpu_index], cufft_info->qInt_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->NsA); } sovDifFft(gpu_info,cufft_info,cufft_info->qb_cu,cufft_info->wb_cu,cufft_info->NsB,1); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( qa_to_qInt2), dim3(grid),dim3(cufft_info->batch),0,gpu_info->stream[gpu_index], cufft_info->qInt_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsB); checkCudaErrors(hipDeviceSynchronize()); } sovDifFft(gpu_info,cufft_info,cufft_info->qca_cu,cufft_info->wa_cu,cufft_info->NsA,-1); //for(int i=0;i<20;i++) printf("%g\n",cufft_info->qca_cu[0][i*cufft_info->NsA]); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( cal_ql), dim3(cufft_info->batch),dim3(threads),smemSize,gpu_info->stream[gpu_index], cufft_info->ql[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->NsB,cufft_info->NxNyNz); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); for(int i=0;i<cufft_info->batch;i++){ cufft_info->ql[gpu_index][i]/=cufft_info->NxNyNz; cufft_info->ffl[gpu_index][i]=cufft_info->ds0/cufft_info->ql[gpu_index][i]; } //w_to_phi<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->pha_cu[gpu_index], cufft_info->phb_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->qca_cu[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsA,cufft_info->NsB,cufft_info->ffl[gpu_index]); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); hipLaunchKernelGGL(( w_to_phi_go), dim3(gridgo),dim3(gpu_info->thread),0,gpu_info->stream[gpu_index], cufft_info->pha_cu[gpu_index], cufft_info->phb_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->qca_cu[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsA,cufft_info->NsB,cufft_info->ffl[gpu_index]); checkCudaErrors(hipDeviceSynchronize()); //printf("cal=%g\n",cufft_info->ql[gpu_index][0]); } checkCudaErrors(hipGetLastError()); } extern void sovDifFft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info,std::vector<double*> g,std::vector<double*> w,int ns,int sign){ int ns1=ns+1; int Nx=cufft_info->Nx; int Ny=cufft_info->Ny; int Nz=cufft_info->Nz; int gpu_index; int iz; dim3 grid(Nx,Ny,Nz),block(cufft_info->batch,1,1),grid1(cufft_info->Nxh1,Ny,Nz); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( initilize_wdz), dim3(grid),dim3(block),0,gpu_info->stream[gpu_index], w[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->ds2); } if(sign==1){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( initilize_q), dim3(grid),dim3(block),1,gpu_info->stream[gpu_index], g[gpu_index],cufft_info->qInt_cu[gpu_index],ns1);//,gpu_info->stream[gpu_index] } for(iz=1;iz<=ns;iz++){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); hipLaunchKernelGGL(( initilize_in_go), dim3(gridgo),dim3(gpu_info->thread),0,gpu_info->stream[gpu_index], cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz); //initilize_in<<<grid,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz); checkCudaErrors(hipfftExecD2Z(cufft_info->plan_forward[gpu_index],cufft_info->device_in[gpu_index],cufft_info->device_out[gpu_index])); //sufaceField<<<grid1,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nx); dim3 gridgo_sur(cufft_info->Nxh1NyNz/gpu_info->thread_sur,cufft_info->batch); hipLaunchKernelGGL(( sufaceField_go), dim3(gridgo_sur),dim3(gpu_info->thread_sur),0,gpu_info->stream[gpu_index], cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nxh1,cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); checkCudaErrors(hipfftExecZ2D(cufft_info->plan_backward[gpu_index],cufft_info->device_out[gpu_index],cufft_info->device_in[gpu_index])); hipLaunchKernelGGL(( in_to_g_go), dim3(gridgo),dim3(gpu_info->thread),0,gpu_info->stream[gpu_index], g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); //in_to_g<<<grid,block,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); } } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(hipStreamSynchronize(gpu_info->stream[gpu_index])); checkCudaErrors(hipDeviceSynchronize()); } } else if(sign==-1){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( initilize_q_inverse), dim3(grid),dim3(block),0,gpu_info->stream[gpu_index], g[gpu_index],cufft_info->qInt_cu[gpu_index],ns1);//,gpu_info->stream[gpu_index] } for(iz=ns-1;iz>=0;iz--){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); //initilize_in<<<grid,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz+2); hipLaunchKernelGGL(( initilize_in_go), dim3(gridgo),dim3(gpu_info->thread),0,gpu_info->stream[gpu_index], cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz+2); checkCudaErrors(hipfftExecD2Z(cufft_info->plan_forward[gpu_index],cufft_info->device_in[gpu_index],cufft_info->device_out[gpu_index])); //sufaceField<<<grid1,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nx); dim3 gridgo_sur(cufft_info->Nxh1NyNz/gpu_info->thread_sur,cufft_info->batch); hipLaunchKernelGGL(( sufaceField_go), dim3(gridgo_sur),dim3(gpu_info->thread_sur),0,gpu_info->stream[gpu_index], cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nxh1,cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); checkCudaErrors(hipfftExecZ2D(cufft_info->plan_backward[gpu_index],cufft_info->device_out[gpu_index],cufft_info->device_in[gpu_index])); //in_to_g<<<grid,block,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); hipLaunchKernelGGL(( in_to_g_go), dim3(gridgo),dim3(gpu_info->thread),0,gpu_info->stream[gpu_index], g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipGetLastError()); } } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(hipStreamSynchronize(gpu_info->stream[gpu_index])); checkCudaErrors(hipDeviceSynchronize()); } } } extern void average_value(std::vector<double*> data,GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int gpu_index; int threads=gpu_info->thread; size_t smemSize = (threads <= 32) ? 2 * threads * sizeof(double) : threads * sizeof(double); std::vector<double*> sum; sum.resize(gpu_info->GPU_N); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(hipMallocManaged((void**)&(sum[gpu_index]), sizeof(double)* cufft_info->batch)); hipLaunchKernelGGL(( reduce3<double>), dim3(cufft_info->batch), dim3(threads), smemSize,gpu_info->stream[gpu_index] , data[gpu_index], sum[gpu_index], cufft_info->NxNyNz); } dim3 block(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); hipLaunchKernelGGL(( minus_average), dim3(block),dim3(cufft_info->batch),0,gpu_info->stream[gpu_index], data[gpu_index],sum[gpu_index]); } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(hipSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(hipFree(sum[gpu_index])); checkCudaErrors(hipDeviceSynchronize()); } }
ab088e28ed5d9a1f431455937e583ec3ca3eaa13.cu
#include"struct.h" #include "init_cuda.h" #include "cuda_scft.h" #include <errno.h> #include <typeinfo> #include"cuda_aid.cuh" extern void average_value(std::vector<double*> data,GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern void getConc(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern double Free_um(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info); extern void scft_main_loop(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ cudaEvent_t start,stop; float msec; cudaError_t error; error=cudaEventCreate(&start); error=cudaEventCreate(&stop); error=cudaEventCreate(&start); error=cudaEventCreate(&stop); error=cudaEventRecord(start,0); double energy=Free_um(gpu_info,cufft_info); error=cudaEventRecord(stop,0); cudaEventSynchronize(stop); error=cudaEventElapsedTime(&msec,start,stop); if(error!=cudaSuccess) printf("fft_test did not successfully detect run time\n"); printf("time=%0.10f\n",msec); } extern double Free_um(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int iter=0; double *freeEnergy=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeOld=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeW=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeAB=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeS=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeDiff=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *freeWsurf=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *inCompMax=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *fpsum=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double *psum=(double*)malloc(sizeof(double)*cufft_info->batch*gpu_info->GPU_N); double MaxInCompMax=10.0; do{ MaxInCompMax=10.0; iter=iter+1; average_value(cufft_info->wa_cu,gpu_info,cufft_info); average_value(cufft_info->wb_cu,gpu_info,cufft_info); getConc(gpu_info,cufft_info); dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); for(int i=0;i<cufft_info->NxNyNz;i++){ //int batch=2; //printf("%g %g %g\n",cufft_info->Pha_cu[gpu_index][i+cufft_info->NxNyNz*batch],cufft_info->wa_cu[gpu_index][i+cufft_info->NxNyNz*batch],cufft_info->wb_cu[gpu_index][i+cufft_info->NxNyNz*batch]); } //printf("%g %g\n",cufft_info->hAB,cufft_info->lambda); phi_w_constrained<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->wa_cu[gpu_index],cufft_info->wb_cu[gpu_index],cufft_info->pha_cu[gpu_index],cufft_info->phb_cu[gpu_index], cufft_info->Pha_cu[gpu_index],cufft_info->hAB,cufft_info->lambda,cufft_info->wopt); checkCudaErrors(cudaDeviceSynchronize()); } if(iter%cufft_info->AverIt==0){ MaxInCompMax=0.0; for(int i=0;i<cufft_info->batch*gpu_info->GPU_N;i++){ freeW[i]=0.0; freeAB[i]=0.0; freeS[i]=0.0; freeWsurf[i]=0.0; inCompMax[i]=0.0; } for(int gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++) for(int i=0;i<cufft_info->batch;i++){ char fname[200]; sprintf(fname,"./Density/pha_%d.dat",i+1); FILE *dp=fopen(fname,"w"); fprintf(dp,"Nx=%d, Ny=%d, Nz=%d\n",cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); fprintf(dp,"dx=%g, dy=%g, dz=%g\n",cufft_info->dx,cufft_info->dy,cufft_info->dz); for(long ijk=0;ijk<cufft_info->NxNyNz;ijk++){ psum[i+gpu_index*cufft_info->batch]=1-cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]-cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]; fpsum[i+gpu_index*cufft_info->batch]=fabs(psum[i+gpu_index*cufft_info->batch]); if(fpsum[i+gpu_index*cufft_info->batch]>inCompMax[i+gpu_index*cufft_info->batch]) inCompMax[i+gpu_index*cufft_info->batch]=fpsum[i+gpu_index*cufft_info->batch]; freeAB[i+gpu_index*cufft_info->batch]=freeAB[i+gpu_index*cufft_info->batch]+cufft_info->hAB*cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]; freeW[i+gpu_index*cufft_info->batch]=freeW[i+gpu_index*cufft_info->batch]-(cufft_info->wa_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz]+cufft_info->wb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]*cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]); fprintf(dp,"%8.8e %8.8e %8.8e %8.8e\n",cufft_info->pha_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->phb_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->wa_cu[gpu_index][ijk+i*cufft_info->NxNyNz],cufft_info->wb_cu[gpu_index][ijk+i*cufft_info->NxNyNz]); }// end for ijk fclose(dp); freeAB[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; //printf("freeW=%0.10f\n",freeW); freeW[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; freeWsurf[i+gpu_index*cufft_info->batch]/=cufft_info->NxNyNz; freeS[i+gpu_index*cufft_info->batch]=-log(cufft_info->ql[gpu_index][i]); //printf("%d %.10f %.10f %.10f %.10f\n",i,qCab[0],qCab[1],freeS[i],-log(qCab[1])); freeOld[i+gpu_index*cufft_info->batch]=freeEnergy[i+gpu_index*cufft_info->batch]; freeEnergy[i+gpu_index*cufft_info->batch]=freeAB[i+gpu_index*cufft_info->batch]+freeW[i+gpu_index*cufft_info->batch]+freeS[i+gpu_index*cufft_info->batch]; printf("GPU %d batch %d: %5d : %.8e, %.8e, %.8e,%.8e, %.8e\n", gpu_index,i,iter, freeEnergy[i+gpu_index*cufft_info->batch],freeAB[i+gpu_index*cufft_info->batch],freeW[i+gpu_index*cufft_info->batch], freeS[i+gpu_index*cufft_info->batch],inCompMax[i+gpu_index*cufft_info->batch]); //printf("%g %g\n",inCompMax[0],inCompMax[i+gpu_index*cufft_info->batch]); if(inCompMax[i+gpu_index*cufft_info->batch]>MaxInCompMax) MaxInCompMax=inCompMax[i+gpu_index*cufft_info->batch]; }// end for i }//end if cufft_info->AverIt==0 //printf("%g %g\n",MaxInCompMax,cufft_info->ErrorinCompMax); }while(iter<cufft_info->MaxIT&&MaxInCompMax>cufft_info->ErrorinCompMax);//! end loop do free(freeEnergy); free(freeOld); free(freeW); free(freeAB); free(freeS); free(freeDiff); free(freeWsurf); free(inCompMax); free(psum); free(fpsum); return 0; } extern void getConc(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int gpu_index; dim3 grid(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); int threads=1024; size_t smemSize = threads * sizeof(double)*2;//(threads <= 32) ? 2 * threads * sizeof(double) : average_value(cufft_info->wa_cu,gpu_info,cufft_info); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); qInt_init<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->qInt_cu[gpu_index]); checkCudaErrors(cudaDeviceSynchronize()); } sovDifFft(gpu_info,cufft_info,cufft_info->qa_cu,cufft_info->wa_cu,cufft_info->NsA,1); sovDifFft(gpu_info,cufft_info,cufft_info->qcb_cu,cufft_info->wb_cu,cufft_info->NsB,-1); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); qa_to_qInt<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->qInt_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->NsA); } sovDifFft(gpu_info,cufft_info,cufft_info->qb_cu,cufft_info->wb_cu,cufft_info->NsB,1); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); qa_to_qInt2<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->qInt_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsB); checkCudaErrors(cudaDeviceSynchronize()); } sovDifFft(gpu_info,cufft_info,cufft_info->qca_cu,cufft_info->wa_cu,cufft_info->NsA,-1); //for(int i=0;i<20;i++) printf("%g\n",cufft_info->qca_cu[0][i*cufft_info->NsA]); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); cal_ql<<<cufft_info->batch,threads,smemSize,gpu_info->stream[gpu_index]>>>(cufft_info->ql[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->NsB,cufft_info->NxNyNz); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); for(int i=0;i<cufft_info->batch;i++){ cufft_info->ql[gpu_index][i]/=cufft_info->NxNyNz; cufft_info->ffl[gpu_index][i]=cufft_info->ds0/cufft_info->ql[gpu_index][i]; } //w_to_phi<<<grid,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(cufft_info->pha_cu[gpu_index], cufft_info->phb_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->qca_cu[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsA,cufft_info->NsB,cufft_info->ffl[gpu_index]); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); w_to_phi_go<<<gridgo,gpu_info->thread,0,gpu_info->stream[gpu_index]>>>(cufft_info->pha_cu[gpu_index], cufft_info->phb_cu[gpu_index],cufft_info->qa_cu[gpu_index],cufft_info->qca_cu[gpu_index],cufft_info->qb_cu[gpu_index],cufft_info->qcb_cu[gpu_index],cufft_info->NsA,cufft_info->NsB,cufft_info->ffl[gpu_index]); checkCudaErrors(cudaDeviceSynchronize()); //printf("cal=%g\n",cufft_info->ql[gpu_index][0]); } checkCudaErrors(cudaGetLastError()); } extern void sovDifFft(GPU_INFO *gpu_info,CUFFT_INFO *cufft_info,std::vector<double*> g,std::vector<double*> w,int ns,int sign){ int ns1=ns+1; int Nx=cufft_info->Nx; int Ny=cufft_info->Ny; int Nz=cufft_info->Nz; int gpu_index; int iz; dim3 grid(Nx,Ny,Nz),block(cufft_info->batch,1,1),grid1(cufft_info->Nxh1,Ny,Nz); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); initilize_wdz<<<grid,block,0,gpu_info->stream[gpu_index]>>>(w[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->ds2); } if(sign==1){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); initilize_q<<<grid,block,1,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->qInt_cu[gpu_index],ns1);//,gpu_info->stream[gpu_index] } for(iz=1;iz<=ns;iz++){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); initilize_in_go<<<gridgo,gpu_info->thread,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz); //initilize_in<<<grid,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz); checkCudaErrors(cufftExecD2Z(cufft_info->plan_forward[gpu_index],cufft_info->device_in[gpu_index],cufft_info->device_out[gpu_index])); //sufaceField<<<grid1,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nx); dim3 gridgo_sur(cufft_info->Nxh1NyNz/gpu_info->thread_sur,cufft_info->batch); sufaceField_go<<<gridgo_sur,gpu_info->thread_sur,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nxh1,cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); checkCudaErrors(cufftExecZ2D(cufft_info->plan_backward[gpu_index],cufft_info->device_out[gpu_index],cufft_info->device_in[gpu_index])); in_to_g_go<<<gridgo,gpu_info->thread,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); //in_to_g<<<grid,block,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); } } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(cudaStreamSynchronize(gpu_info->stream[gpu_index])); checkCudaErrors(cudaDeviceSynchronize()); } } else if(sign==-1){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); initilize_q_inverse<<<grid,block,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->qInt_cu[gpu_index],ns1);//,gpu_info->stream[gpu_index] } for(iz=ns-1;iz>=0;iz--){ for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); dim3 gridgo(cufft_info->NxNyNz/gpu_info->thread,cufft_info->batch); //initilize_in<<<grid,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz+2); initilize_in_go<<<gridgo,gpu_info->thread,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_in[gpu_index],g[gpu_index],cufft_info->wdz_cu[gpu_index],ns1,iz+2); checkCudaErrors(cufftExecD2Z(cufft_info->plan_forward[gpu_index],cufft_info->device_in[gpu_index],cufft_info->device_out[gpu_index])); //sufaceField<<<grid1,block,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nx); dim3 gridgo_sur(cufft_info->Nxh1NyNz/gpu_info->thread_sur,cufft_info->batch); sufaceField_go<<<gridgo_sur,gpu_info->thread_sur,0,gpu_info->stream[gpu_index]>>>(cufft_info->device_out[gpu_index],cufft_info->kxyzdz_cu[gpu_index],cufft_info->Nxh1,cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); checkCudaErrors(cufftExecZ2D(cufft_info->plan_backward[gpu_index],cufft_info->device_out[gpu_index],cufft_info->device_in[gpu_index])); //in_to_g<<<grid,block,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); in_to_g_go<<<gridgo,gpu_info->thread,0,gpu_info->stream[gpu_index]>>>(g[gpu_index],cufft_info->wdz_cu[gpu_index],cufft_info->device_in[gpu_index],ns1, iz); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaGetLastError()); } } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(cudaStreamSynchronize(gpu_info->stream[gpu_index])); checkCudaErrors(cudaDeviceSynchronize()); } } } extern void average_value(std::vector<double*> data,GPU_INFO *gpu_info,CUFFT_INFO *cufft_info){ int gpu_index; int threads=gpu_info->thread; size_t smemSize = (threads <= 32) ? 2 * threads * sizeof(double) : threads * sizeof(double); std::vector<double*> sum; sum.resize(gpu_info->GPU_N); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(cudaMallocManaged((void**)&(sum[gpu_index]), sizeof(double)* cufft_info->batch)); reduce3<double><<< cufft_info->batch, threads, smemSize,gpu_info->stream[gpu_index] >>>(data[gpu_index], sum[gpu_index], cufft_info->NxNyNz); } dim3 block(cufft_info->Nx,cufft_info->Ny,cufft_info->Nz); for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); minus_average<<<block,cufft_info->batch,0,gpu_info->stream[gpu_index]>>>(data[gpu_index],sum[gpu_index]); } for(gpu_index=0;gpu_index<gpu_info->GPU_N;gpu_index++){ checkCudaErrors(cudaSetDevice(gpu_info->whichGPUs[gpu_index])); checkCudaErrors(cudaFree(sum[gpu_index])); checkCudaErrors(cudaDeviceSynchronize()); } }
6794f74b600f11703fd6a91137d40dc33eab4e44.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020-2022 by XGBoost Contributors */ #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/transform_scan.h> #include <thrust/unique.h> #include <limits> // std::numeric_limits #include <memory> #include <utility> #include "categorical.h" #include "common.h" #include "device_helpers_hip.cuh" #include "hist_util.h" #include "quantile.cuh" #include "quantile.h" #include "xgboost/span.h" namespace xgboost { namespace common { using WQSketch = HostSketchContainer::WQSketch; using SketchEntry = WQSketch::Entry; // Algorithm 4 in XGBoost's paper, using binary search to find i. template <typename EntryIter> __device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) { assert(end - beg >= 2); rank *= 2; auto front = *beg; if (rank < front.rmin + front.rmax) { return *beg; } auto back = *(end - 1); if (rank >= back.rmin + back.rmax) { return back; } auto search_begin = dh::MakeTransformIterator<float>( beg, [=] __device__(SketchEntry const &entry) { return entry.rmin + entry.rmax; }); auto search_end = search_begin + (end - beg); auto i = thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) - search_begin - 1; if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) { return *(beg + i); } else { return *(beg + i + 1); } } template <typename InEntry, typename ToSketchEntry> void PruneImpl(common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<InEntry const> sorted_data, Span<size_t const> columns_ptr_in, // could be ptr for data or cuts Span<FeatureType const> feature_types, Span<SketchEntry> out_cuts, ToSketchEntry to_sketch_entry) { dh::LaunchN(out_cuts.size(), [=] __device__(size_t idx) { size_t column_id = dh::SegmentId(cuts_ptr, idx); auto out_column = out_cuts.subspan( cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]); auto in_column = sorted_data.subspan(columns_ptr_in[column_id], columns_ptr_in[column_id + 1] - columns_ptr_in[column_id]); auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id]; idx -= cuts_ptr[column_id]; auto front = to_sketch_entry(0ul, in_column, column_id); auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id); auto is_cat = IsCat(feature_types, column_id); if (in_column.size() <= to || is_cat) { // cut idx equals sample idx out_column[idx] = to_sketch_entry(idx, in_column, column_id); return; } // 1 thread for each output. See A.4 for detail. auto d_out = out_column; if (idx == 0) { d_out.front() = front; return; } if (idx == to - 1) { d_out.back() = back; return; } float w = back.rmin - front.rmax; auto budget = static_cast<float>(d_out.size()); assert(budget != 0); auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax); auto it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { auto e = to_sketch_entry(idx, in_column, column_id); return e; }); d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q); }); } template <typename T, typename U> void CopyTo(Span<T> out, Span<U> src) { CHECK_EQ(out.size(), src.size()); static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value, ""); dh::safe_cuda(hipMemcpyAsync(out.data(), src.data(), out.size_bytes(), hipMemcpyDefault)); } // Compute the merge path. common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath( Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }), d_x.data())); auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }), d_y.data())); using Tuple = thrust::tuple<uint64_t, uint64_t>; thrust::constant_iterator<uint64_t> a_ind_iter(0ul); thrust::constant_iterator<uint64_t> b_ind_iter(1ul); auto place_holder = thrust::make_constant_iterator<uint64_t>(0u); auto x_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder)); auto y_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder)); dh::XGBCachingDeviceAllocator<Tuple> alloc; static_assert(sizeof(Tuple) == sizeof(SketchEntry), ""); // We reuse the memory for storing merge path. common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()}; // Determine the merge path, 0 if element is from x, 1 if it's from y. thrust::merge_by_key( thrust::hip::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(), y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it, y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(), [=] __device__(auto const &l, auto const &r) -> bool { auto l_column_id = thrust::get<0>(l); auto r_column_id = thrust::get<0>(r); if (l_column_id == r_column_id) { return thrust::get<1>(l).value < thrust::get<1>(r).value; } return l_column_id < r_column_id; }); // Compute output ptr auto transform_it = thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data())); thrust::transform( thrust::hip::par(alloc), transform_it, transform_it + x_ptr.size(), out_ptr.data(), [] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); }); // 0^th is the indicator, 1^th is placeholder auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); }; // 0^th is the counter for x, 1^th for y. auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); }; auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); }; auto scan_key_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); }); auto scan_val_it = dh::MakeTransformIterator<Tuple>( merge_path.data(), [=] __device__(Tuple const &t) -> Tuple { auto ind = get_ind(t); // == 0 if element is from x // x_counter, y_counter return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind); }); // Compute the index for both x and y (which of the element in a and b are used in each // comparison) by scanning the binary merge path. Take output [(x_0, y_0), (x_0, y_1), // ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path. // Assuming y_0 is less than x_0 so this step is toward the end of y. After the // comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0 // is landed into output as the first element in merge result. The scan result is the // subscript of x and y. thrust::exclusive_scan_by_key( thrust::hip::par(alloc), scan_key_it, scan_key_it + merge_path.size(), scan_val_it, merge_path.data(), thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul), thrust::equal_to<size_t>{}, [=] __device__(Tuple const &l, Tuple const &r) -> Tuple { return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r)); }); return merge_path; } // Merge d_x and d_y into out. Because the final output depends on predicate (which // summary does the output element come from) result by definition of merged rank. So we // run it in 2 passes to obtain the merge path and then customize the standard merge // algorithm. void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { dh::safe_cuda(hipSetDevice(device)); CHECK_EQ(d_x.size() + d_y.size(), out.size()); CHECK_EQ(x_ptr.size(), out_ptr.size()); CHECK_EQ(y_ptr.size(), out_ptr.size()); auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr); auto d_out = out; dh::LaunchN(d_out.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(out_ptr, idx); idx -= out_ptr[column_id]; auto d_x_column = d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]); auto d_y_column = d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]); auto d_out_column = d_out.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); auto d_path_column = d_merge_path.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); uint64_t a_ind, b_ind; thrust::tie(a_ind, b_ind) = d_path_column[idx]; // Handle empty column. If both columns are empty, we should not get this column_id // as result of binary search. assert((d_x_column.size() != 0) || (d_y_column.size() != 0)); if (d_x_column.size() == 0) { d_out_column[idx] = d_y_column[b_ind]; return; } if (d_y_column.size() == 0) { d_out_column[idx] = d_x_column[a_ind]; return; } // Handle trailing elements. assert(a_ind <= d_x_column.size()); if (a_ind == d_x_column.size()) { // Trailing elements are from y because there's no more x to land. auto y_elem = d_y_column[b_ind]; d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(), y_elem.rmax + d_x_column.back().rmax, y_elem.wmin, y_elem.value); return; } auto x_elem = d_x_column[a_ind]; assert(b_ind <= d_y_column.size()); if (b_ind == d_y_column.size()) { d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(), x_elem.rmax + d_y_column.back().rmax, x_elem.wmin, x_elem.value); return; } auto y_elem = d_y_column[b_ind]; /* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret it is rewriting the symbols on both side of equality. Take eq (26) as an example: Expand it according to definition of extended rank then rewrite it into: If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}: r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) + [r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)] Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied similarly with $k_i$ comes from different $D$. just use different symbol on different source of summary. */ assert(idx < d_out_column.size()); if (x_elem.value == y_elem.value) { d_out_column[idx] = SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax, x_elem.wmin + y_elem.wmin, x_elem.value}; } else if (x_elem.value < y_elem.value) { // elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than // x_elem if we put x_elem in D_2. float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext(); // rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for // implementation, the weight is stored in a separated field and we compute the // extended definition on the fly when needed. d_out_column[idx] = SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(), x_elem.wmin, x_elem.value}; } else { // elem from y is landed. float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext(); d_out_column[idx] = SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax, y_elem.wmin, y_elem.value}; } }); } void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr, common::Span<OffsetT> cuts_ptr, size_t total_cuts, Span<float> weights) { dh::safe_cuda(hipSetDevice(device_)); Span<SketchEntry> out; dh::device_vector<SketchEntry> cuts; bool first_window = this->Current().empty(); if (!first_window) { cuts.resize(total_cuts); out = dh::ToSpan(cuts); } else { this->Current().resize(total_cuts); out = dh::ToSpan(this->Current()); } auto ft = this->feature_types_.ConstDeviceSpan(); if (weights.empty()) { auto to_sketch_entry = [] __device__(size_t sample_idx, Span<Entry const> const &column, size_t) { float rmin = sample_idx; float rmax = sample_idx + 1; return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } else { auto to_sketch_entry = [weights, columns_ptr] __device__( size_t sample_idx, Span<Entry const> const &column, size_t column_id) { Span<float const> column_weights_scan = weights.subspan(columns_ptr[column_id], column.size()); float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f; float rmax = column_weights_scan[sample_idx]; float wmin = rmax - rmin; wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error. return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } auto n_uniques = this->ScanInput(out, cuts_ptr); if (!first_window) { CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size()); out = out.subspan(0, n_uniques); this->Merge(cuts_ptr, out); this->FixError(); } else { this->Current().resize(n_uniques); this->columns_ptr_.SetDevice(device_); this->columns_ptr_.Resize(cuts_ptr.size()); auto d_cuts_ptr = this->columns_ptr_.DeviceSpan(); CopyTo(d_cuts_ptr, cuts_ptr); } } size_t SketchContainer::ScanInput(Span<SketchEntry> entries, Span<OffsetT> d_columns_ptr_in) { /* There are 2 types of duplication. First is duplicated feature values, which comes * from user input data. Second is duplicated sketching entries, which is generated by * pruning or merging. We preserve the first type and remove the second type. */ timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); CHECK_EQ(d_columns_ptr_in.size(), num_columns_ + 1); dh::XGBCachingDeviceAllocator<char> alloc; auto key_it = dh::MakeTransformIterator<size_t>( thrust::make_reverse_iterator(thrust::make_counting_iterator(entries.size())), [=] __device__(size_t idx) { return dh::SegmentId(d_columns_ptr_in, idx); }); // Reverse scan to accumulate weights into first duplicated element on left. auto val_it = thrust::make_reverse_iterator(dh::tend(entries)); thrust::inclusive_scan_by_key( thrust::hip::par(alloc), key_it, key_it + entries.size(), val_it, val_it, thrust::equal_to<size_t>{}, [] __device__(SketchEntry const &r, SketchEntry const &l) { // Only accumulate for the first type of duplication. if (l.value - r.value == 0 && l.rmin - r.rmin != 0) { auto w = l.wmin + r.wmin; SketchEntry v{l.rmin, l.rmin + w, w, l.value}; return v; } return l; }); auto d_columns_ptr_out = columns_ptr_b_.DeviceSpan(); // thrust unique_by_key preserves the first element. auto n_uniques = dh::SegmentedUnique( d_columns_ptr_in.data(), d_columns_ptr_in.data() + d_columns_ptr_in.size(), entries.data(), entries.data() + entries.size(), d_columns_ptr_out.data(), entries.data(), detail::SketchUnique{}); CopyTo(d_columns_ptr_in, d_columns_ptr_out); timer_.Stop(__func__); return n_uniques; } void SketchContainer::Prune(size_t to) { timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); OffsetT to_total = 0; auto& h_columns_ptr = columns_ptr_b_.HostVector(); h_columns_ptr[0] = to_total; auto const& h_feature_types = feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t length = this->Column(i).size(); length = ::min(length, to); if (IsCat(h_feature_types, i)) { length = this->Column(i).size(); } to_total += length; h_columns_ptr[i+1] = to_total; } this->Other().resize(to_total); auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan(); auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan(); auto out = dh::ToSpan(this->Other()); auto in = dh::ToSpan(this->Current()); auto no_op = [] __device__(size_t sample_idx, Span<SketchEntry const> const &entries, size_t) { return entries[sample_idx]; }; // NOLINT auto ft = this->feature_types_.ConstDeviceSpan(); PruneImpl<SketchEntry>(d_columns_ptr_out, in, d_columns_ptr_in, ft, out, no_op); this->columns_ptr_.Copy(columns_ptr_b_); this->Alternate(); this->Unique(); timer_.Stop(__func__); } void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr, Span<SketchEntry const> that) { dh::safe_cuda(hipSetDevice(device_)); timer_.Start(__func__); if (this->Current().size() == 0) { CHECK_EQ(this->columns_ptr_.HostVector().back(), 0); CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size()); CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1); thrust::copy(thrust::device, d_that_columns_ptr.data(), d_that_columns_ptr.data() + d_that_columns_ptr.size(), this->columns_ptr_.DevicePointer()); auto total = this->columns_ptr_.HostVector().back(); this->Current().resize(total); CopyTo(dh::ToSpan(this->Current()), that); timer_.Stop(__func__); return; } this->Other().resize(this->Current().size() + that.size()); CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size()); MergeImpl(device_, this->Data(), this->ColumnsPtr(), that, d_that_columns_ptr, dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan()); this->columns_ptr_.Copy(columns_ptr_b_); CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1); this->Alternate(); if (this->HasCategorical()) { auto d_feature_types = this->FeatureTypes().ConstDeviceSpan(); this->Unique([d_feature_types] __device__(size_t l_fidx, size_t r_fidx) { return l_fidx == r_fidx && IsCat(d_feature_types, l_fidx); }); } timer_.Stop(__func__); } void SketchContainer::FixError() { dh::safe_cuda(hipSetDevice(device_)); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); auto in = dh::ToSpan(this->Current()); dh::LaunchN(in.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_columns_ptr, idx); auto in_column = in.subspan(d_columns_ptr[column_id], d_columns_ptr[column_id + 1] - d_columns_ptr[column_id]); idx -= d_columns_ptr[column_id]; float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin; if (in_column[idx].rmin < prev_rmin) { in_column[idx].rmin = prev_rmin; } float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax; if (in_column[idx].rmax < prev_rmax) { in_column[idx].rmax = prev_rmax; } float rmin_next = in_column[idx].RMinNext(); if (in_column[idx].rmax < rmin_next) { in_column[idx].rmax = rmin_next; } }); } void SketchContainer::AllReduce() { dh::safe_cuda(hipSetDevice(device_)); auto world = rabit::GetWorldSize(); if (world == 1) { return; } timer_.Start(__func__); if (!reducer_) { reducer_ = std::make_unique<dh::AllReducer>(); reducer_->Init(device_); } // Reduce the overhead on syncing. size_t global_sum_rows = num_rows_; rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1); size_t intermediate_num_cuts = ::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor)); this->Prune(intermediate_num_cuts); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1); size_t n = d_columns_ptr.size(); rabit::Allreduce<rabit::op::Max>(&n, 1); CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers"; // Get the columns ptr from all workers dh::device_vector<SketchContainer::OffsetT> gathered_ptrs; gathered_ptrs.resize(d_columns_ptr.size() * world, 0); size_t rank = rabit::GetRank(); auto offset = rank * d_columns_ptr.size(); thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(), gathered_ptrs.begin() + offset); reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(), gathered_ptrs.size()); // Get the data from all workers. std::vector<size_t> recv_lengths; dh::caching_device_vector<char> recvbuf; reducer_->AllGather(this->Current().data().get(), dh::ToSpan(this->Current()).size_bytes(), &recv_lengths, &recvbuf); reducer_->Synchronize(); // Segment the received data. auto s_recvbuf = dh::ToSpan(recvbuf); std::vector<Span<SketchEntry>> allworkers; offset = 0; for (int32_t i = 0; i < world; ++i) { size_t length_as_bytes = recv_lengths.at(i); auto raw = s_recvbuf.subspan(offset, length_as_bytes); auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()), length_as_bytes / sizeof(SketchEntry)); allworkers.emplace_back(sketch); offset += length_as_bytes; } // Merge them into a new sketch. SketchContainer new_sketch(this->feature_types_, num_bins_, this->num_columns_, global_sum_rows, this->device_); for (size_t i = 0; i < allworkers.size(); ++i) { auto worker = allworkers[i]; auto worker_ptr = dh::ToSpan(gathered_ptrs) .subspan(i * d_columns_ptr.size(), d_columns_ptr.size()); new_sketch.Merge(worker_ptr, worker); new_sketch.FixError(); } *this = std::move(new_sketch); timer_.Stop(__func__); } namespace { struct InvalidCatOp { Span<SketchEntry const> values; Span<size_t const> ptrs; Span<FeatureType const> ft; XGBOOST_DEVICE bool operator()(size_t i) const { auto fidx = dh::SegmentId(ptrs, i); return IsCat(ft, fidx) && InvalidCat(values[i].value); } }; } // anonymous namespace void SketchContainer::MakeCuts(HistogramCuts* p_cuts) { timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); p_cuts->min_vals_.Resize(num_columns_); // Sync between workers. this->AllReduce(); // Prune to final number of bins. this->Prune(num_bins_ + 1); this->FixError(); // Set up inputs auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); p_cuts->min_vals_.SetDevice(device_); auto d_min_values = p_cuts->min_vals_.DeviceSpan(); auto const in_cut_values = dh::ToSpan(this->Current()); // Set up output ptr p_cuts->cut_ptrs_.SetDevice(device_); auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector(); h_out_columns_ptr.clear(); h_out_columns_ptr.push_back(0); auto const& h_feature_types = this->feature_types_.ConstHostSpan(); auto d_ft = feature_types_.ConstDeviceSpan(); std::vector<SketchEntry> max_values; float max_cat{-1.f}; if (has_categorical_) { dh::XGBCachingDeviceAllocator<char> alloc; auto key_it = dh::MakeTransformIterator<bst_feature_t>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) -> bst_feature_t { return dh::SegmentId(d_in_columns_ptr, i); }); auto invalid_op = InvalidCatOp{in_cut_values, d_in_columns_ptr, d_ft}; auto val_it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) { auto fidx = dh::SegmentId(d_in_columns_ptr, i); auto v = in_cut_values[i]; if (IsCat(d_ft, fidx)) { if (invalid_op(i)) { // use inf to indicate invalid value, this way we can keep it as in // indicator in the reduce operation as it's always the greatest value. v.value = std::numeric_limits<float>::infinity(); } } return v; }); CHECK_EQ(num_columns_, d_in_columns_ptr.size() - 1); max_values.resize(d_in_columns_ptr.size() - 1); dh::caching_device_vector<SketchEntry> d_max_values(d_in_columns_ptr.size() - 1); thrust::reduce_by_key(thrust::hip::par(alloc), key_it, key_it + in_cut_values.size(), val_it, thrust::make_discard_iterator(), d_max_values.begin(), thrust::equal_to<bst_feature_t>{}, [] __device__(auto l, auto r) { return l.value > r.value ? l : r; }); dh::CopyDeviceSpanToVector(&max_values, dh::ToSpan(d_max_values)); auto max_it = common::MakeIndexTransformIter([&](auto i) { if (IsCat(h_feature_types, i)) { return max_values[i].value; } return -1.f; }); max_cat = *std::max_element(max_it, max_it + max_values.size()); if (std::isinf(max_cat)) { InvalidCategory(); } } // Set up output cuts for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t column_size = ::max(static_cast<size_t>(1ul), this->Column(i).size()); if (IsCat(h_feature_types, i)) { // column_size is the number of unique values in that feature. CheckMaxCat(max_values[i].value, column_size); h_out_columns_ptr.push_back(max_values[i].value + 1); // includes both max_cat and 0. } else { h_out_columns_ptr.push_back( ::min(static_cast<size_t>(column_size), static_cast<size_t>(num_bins_))); } } std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(), h_out_columns_ptr.begin()); auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan(); size_t total_bins = h_out_columns_ptr.back(); p_cuts->cut_values_.SetDevice(device_); p_cuts->cut_values_.Resize(total_bins); auto out_cut_values = p_cuts->cut_values_.DeviceSpan(); dh::LaunchN(total_bins, [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_out_columns_ptr, idx); auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id], d_in_columns_ptr[column_id + 1] - d_in_columns_ptr[column_id]); auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id], d_out_columns_ptr[column_id + 1] - d_out_columns_ptr[column_id]); idx -= d_out_columns_ptr[column_id]; if (in_column.size() == 0) { // If the column is empty, we push a dummy value. It won't affect training as the // column is empty, trees cannot split on it. This is just to be consistent with // rest of the library. if (idx == 0) { d_min_values[column_id] = kRtEps; out_column[0] = kRtEps; assert(out_column.size() == 1); } return; } if (idx == 0 && !IsCat(d_ft, column_id)) { auto mval = in_column[idx].value; d_min_values[column_id] = mval - (fabs(mval) + 1e-5); } if (IsCat(d_ft, column_id)) { out_column[idx] = idx; return; } // Last thread is responsible for setting a value that's greater than other cuts. if (idx == out_column.size() - 1) { const bst_float cpt = in_column.back().value; // this must be bigger than last value in a scale const bst_float last = cpt + (fabs(cpt) + 1e-5); out_column[idx] = last; return; } assert(idx+1 < in_column.size()); out_column[idx] = in_column[idx+1].value; }); p_cuts->SetCategorical(this->has_categorical_, max_cat); timer_.Stop(__func__); } } // namespace common } // namespace xgboost
6794f74b600f11703fd6a91137d40dc33eab4e44.cu
/*! * Copyright 2020-2022 by XGBoost Contributors */ #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/transform_scan.h> #include <thrust/unique.h> #include <limits> // std::numeric_limits #include <memory> #include <utility> #include "categorical.h" #include "common.h" #include "device_helpers.cuh" #include "hist_util.h" #include "quantile.cuh" #include "quantile.h" #include "xgboost/span.h" namespace xgboost { namespace common { using WQSketch = HostSketchContainer::WQSketch; using SketchEntry = WQSketch::Entry; // Algorithm 4 in XGBoost's paper, using binary search to find i. template <typename EntryIter> __device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) { assert(end - beg >= 2); rank *= 2; auto front = *beg; if (rank < front.rmin + front.rmax) { return *beg; } auto back = *(end - 1); if (rank >= back.rmin + back.rmax) { return back; } auto search_begin = dh::MakeTransformIterator<float>( beg, [=] __device__(SketchEntry const &entry) { return entry.rmin + entry.rmax; }); auto search_end = search_begin + (end - beg); auto i = thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) - search_begin - 1; if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) { return *(beg + i); } else { return *(beg + i + 1); } } template <typename InEntry, typename ToSketchEntry> void PruneImpl(common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<InEntry const> sorted_data, Span<size_t const> columns_ptr_in, // could be ptr for data or cuts Span<FeatureType const> feature_types, Span<SketchEntry> out_cuts, ToSketchEntry to_sketch_entry) { dh::LaunchN(out_cuts.size(), [=] __device__(size_t idx) { size_t column_id = dh::SegmentId(cuts_ptr, idx); auto out_column = out_cuts.subspan( cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]); auto in_column = sorted_data.subspan(columns_ptr_in[column_id], columns_ptr_in[column_id + 1] - columns_ptr_in[column_id]); auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id]; idx -= cuts_ptr[column_id]; auto front = to_sketch_entry(0ul, in_column, column_id); auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id); auto is_cat = IsCat(feature_types, column_id); if (in_column.size() <= to || is_cat) { // cut idx equals sample idx out_column[idx] = to_sketch_entry(idx, in_column, column_id); return; } // 1 thread for each output. See A.4 for detail. auto d_out = out_column; if (idx == 0) { d_out.front() = front; return; } if (idx == to - 1) { d_out.back() = back; return; } float w = back.rmin - front.rmax; auto budget = static_cast<float>(d_out.size()); assert(budget != 0); auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax); auto it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { auto e = to_sketch_entry(idx, in_column, column_id); return e; }); d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q); }); } template <typename T, typename U> void CopyTo(Span<T> out, Span<U> src) { CHECK_EQ(out.size(), src.size()); static_assert(std::is_same<std::remove_cv_t<T>, std::remove_cv_t<T>>::value, ""); dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(), out.size_bytes(), cudaMemcpyDefault)); } // Compute the merge path. common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath( Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }), d_x.data())); auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }), d_y.data())); using Tuple = thrust::tuple<uint64_t, uint64_t>; thrust::constant_iterator<uint64_t> a_ind_iter(0ul); thrust::constant_iterator<uint64_t> b_ind_iter(1ul); auto place_holder = thrust::make_constant_iterator<uint64_t>(0u); auto x_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder)); auto y_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder)); dh::XGBCachingDeviceAllocator<Tuple> alloc; static_assert(sizeof(Tuple) == sizeof(SketchEntry), ""); // We reuse the memory for storing merge path. common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()}; // Determine the merge path, 0 if element is from x, 1 if it's from y. thrust::merge_by_key( thrust::cuda::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(), y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it, y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(), [=] __device__(auto const &l, auto const &r) -> bool { auto l_column_id = thrust::get<0>(l); auto r_column_id = thrust::get<0>(r); if (l_column_id == r_column_id) { return thrust::get<1>(l).value < thrust::get<1>(r).value; } return l_column_id < r_column_id; }); // Compute output ptr auto transform_it = thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data())); thrust::transform( thrust::cuda::par(alloc), transform_it, transform_it + x_ptr.size(), out_ptr.data(), [] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); }); // 0^th is the indicator, 1^th is placeholder auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); }; // 0^th is the counter for x, 1^th for y. auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); }; auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); }; auto scan_key_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); }); auto scan_val_it = dh::MakeTransformIterator<Tuple>( merge_path.data(), [=] __device__(Tuple const &t) -> Tuple { auto ind = get_ind(t); // == 0 if element is from x // x_counter, y_counter return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind); }); // Compute the index for both x and y (which of the element in a and b are used in each // comparison) by scanning the binary merge path. Take output [(x_0, y_0), (x_0, y_1), // ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path. // Assuming y_0 is less than x_0 so this step is toward the end of y. After the // comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0 // is landed into output as the first element in merge result. The scan result is the // subscript of x and y. thrust::exclusive_scan_by_key( thrust::cuda::par(alloc), scan_key_it, scan_key_it + merge_path.size(), scan_val_it, merge_path.data(), thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul), thrust::equal_to<size_t>{}, [=] __device__(Tuple const &l, Tuple const &r) -> Tuple { return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r)); }); return merge_path; } // Merge d_x and d_y into out. Because the final output depends on predicate (which // summary does the output element come from) result by definition of merged rank. So we // run it in 2 passes to obtain the merge path and then customize the standard merge // algorithm. void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { dh::safe_cuda(cudaSetDevice(device)); CHECK_EQ(d_x.size() + d_y.size(), out.size()); CHECK_EQ(x_ptr.size(), out_ptr.size()); CHECK_EQ(y_ptr.size(), out_ptr.size()); auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr); auto d_out = out; dh::LaunchN(d_out.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(out_ptr, idx); idx -= out_ptr[column_id]; auto d_x_column = d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]); auto d_y_column = d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]); auto d_out_column = d_out.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); auto d_path_column = d_merge_path.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); uint64_t a_ind, b_ind; thrust::tie(a_ind, b_ind) = d_path_column[idx]; // Handle empty column. If both columns are empty, we should not get this column_id // as result of binary search. assert((d_x_column.size() != 0) || (d_y_column.size() != 0)); if (d_x_column.size() == 0) { d_out_column[idx] = d_y_column[b_ind]; return; } if (d_y_column.size() == 0) { d_out_column[idx] = d_x_column[a_ind]; return; } // Handle trailing elements. assert(a_ind <= d_x_column.size()); if (a_ind == d_x_column.size()) { // Trailing elements are from y because there's no more x to land. auto y_elem = d_y_column[b_ind]; d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(), y_elem.rmax + d_x_column.back().rmax, y_elem.wmin, y_elem.value); return; } auto x_elem = d_x_column[a_ind]; assert(b_ind <= d_y_column.size()); if (b_ind == d_y_column.size()) { d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(), x_elem.rmax + d_y_column.back().rmax, x_elem.wmin, x_elem.value); return; } auto y_elem = d_y_column[b_ind]; /* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret it is rewriting the symbols on both side of equality. Take eq (26) as an example: Expand it according to definition of extended rank then rewrite it into: If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}: r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) + [r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)] Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied similarly with $k_i$ comes from different $D$. just use different symbol on different source of summary. */ assert(idx < d_out_column.size()); if (x_elem.value == y_elem.value) { d_out_column[idx] = SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax, x_elem.wmin + y_elem.wmin, x_elem.value}; } else if (x_elem.value < y_elem.value) { // elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than // x_elem if we put x_elem in D_2. float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext(); // rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for // implementation, the weight is stored in a separated field and we compute the // extended definition on the fly when needed. d_out_column[idx] = SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(), x_elem.wmin, x_elem.value}; } else { // elem from y is landed. float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext(); d_out_column[idx] = SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax, y_elem.wmin, y_elem.value}; } }); } void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr, common::Span<OffsetT> cuts_ptr, size_t total_cuts, Span<float> weights) { dh::safe_cuda(cudaSetDevice(device_)); Span<SketchEntry> out; dh::device_vector<SketchEntry> cuts; bool first_window = this->Current().empty(); if (!first_window) { cuts.resize(total_cuts); out = dh::ToSpan(cuts); } else { this->Current().resize(total_cuts); out = dh::ToSpan(this->Current()); } auto ft = this->feature_types_.ConstDeviceSpan(); if (weights.empty()) { auto to_sketch_entry = [] __device__(size_t sample_idx, Span<Entry const> const &column, size_t) { float rmin = sample_idx; float rmax = sample_idx + 1; return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } else { auto to_sketch_entry = [weights, columns_ptr] __device__( size_t sample_idx, Span<Entry const> const &column, size_t column_id) { Span<float const> column_weights_scan = weights.subspan(columns_ptr[column_id], column.size()); float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f; float rmax = column_weights_scan[sample_idx]; float wmin = rmax - rmin; wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error. return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } auto n_uniques = this->ScanInput(out, cuts_ptr); if (!first_window) { CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size()); out = out.subspan(0, n_uniques); this->Merge(cuts_ptr, out); this->FixError(); } else { this->Current().resize(n_uniques); this->columns_ptr_.SetDevice(device_); this->columns_ptr_.Resize(cuts_ptr.size()); auto d_cuts_ptr = this->columns_ptr_.DeviceSpan(); CopyTo(d_cuts_ptr, cuts_ptr); } } size_t SketchContainer::ScanInput(Span<SketchEntry> entries, Span<OffsetT> d_columns_ptr_in) { /* There are 2 types of duplication. First is duplicated feature values, which comes * from user input data. Second is duplicated sketching entries, which is generated by * pruning or merging. We preserve the first type and remove the second type. */ timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); CHECK_EQ(d_columns_ptr_in.size(), num_columns_ + 1); dh::XGBCachingDeviceAllocator<char> alloc; auto key_it = dh::MakeTransformIterator<size_t>( thrust::make_reverse_iterator(thrust::make_counting_iterator(entries.size())), [=] __device__(size_t idx) { return dh::SegmentId(d_columns_ptr_in, idx); }); // Reverse scan to accumulate weights into first duplicated element on left. auto val_it = thrust::make_reverse_iterator(dh::tend(entries)); thrust::inclusive_scan_by_key( thrust::cuda::par(alloc), key_it, key_it + entries.size(), val_it, val_it, thrust::equal_to<size_t>{}, [] __device__(SketchEntry const &r, SketchEntry const &l) { // Only accumulate for the first type of duplication. if (l.value - r.value == 0 && l.rmin - r.rmin != 0) { auto w = l.wmin + r.wmin; SketchEntry v{l.rmin, l.rmin + w, w, l.value}; return v; } return l; }); auto d_columns_ptr_out = columns_ptr_b_.DeviceSpan(); // thrust unique_by_key preserves the first element. auto n_uniques = dh::SegmentedUnique( d_columns_ptr_in.data(), d_columns_ptr_in.data() + d_columns_ptr_in.size(), entries.data(), entries.data() + entries.size(), d_columns_ptr_out.data(), entries.data(), detail::SketchUnique{}); CopyTo(d_columns_ptr_in, d_columns_ptr_out); timer_.Stop(__func__); return n_uniques; } void SketchContainer::Prune(size_t to) { timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); OffsetT to_total = 0; auto& h_columns_ptr = columns_ptr_b_.HostVector(); h_columns_ptr[0] = to_total; auto const& h_feature_types = feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t length = this->Column(i).size(); length = std::min(length, to); if (IsCat(h_feature_types, i)) { length = this->Column(i).size(); } to_total += length; h_columns_ptr[i+1] = to_total; } this->Other().resize(to_total); auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan(); auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan(); auto out = dh::ToSpan(this->Other()); auto in = dh::ToSpan(this->Current()); auto no_op = [] __device__(size_t sample_idx, Span<SketchEntry const> const &entries, size_t) { return entries[sample_idx]; }; // NOLINT auto ft = this->feature_types_.ConstDeviceSpan(); PruneImpl<SketchEntry>(d_columns_ptr_out, in, d_columns_ptr_in, ft, out, no_op); this->columns_ptr_.Copy(columns_ptr_b_); this->Alternate(); this->Unique(); timer_.Stop(__func__); } void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr, Span<SketchEntry const> that) { dh::safe_cuda(cudaSetDevice(device_)); timer_.Start(__func__); if (this->Current().size() == 0) { CHECK_EQ(this->columns_ptr_.HostVector().back(), 0); CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size()); CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1); thrust::copy(thrust::device, d_that_columns_ptr.data(), d_that_columns_ptr.data() + d_that_columns_ptr.size(), this->columns_ptr_.DevicePointer()); auto total = this->columns_ptr_.HostVector().back(); this->Current().resize(total); CopyTo(dh::ToSpan(this->Current()), that); timer_.Stop(__func__); return; } this->Other().resize(this->Current().size() + that.size()); CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size()); MergeImpl(device_, this->Data(), this->ColumnsPtr(), that, d_that_columns_ptr, dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan()); this->columns_ptr_.Copy(columns_ptr_b_); CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1); this->Alternate(); if (this->HasCategorical()) { auto d_feature_types = this->FeatureTypes().ConstDeviceSpan(); this->Unique([d_feature_types] __device__(size_t l_fidx, size_t r_fidx) { return l_fidx == r_fidx && IsCat(d_feature_types, l_fidx); }); } timer_.Stop(__func__); } void SketchContainer::FixError() { dh::safe_cuda(cudaSetDevice(device_)); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); auto in = dh::ToSpan(this->Current()); dh::LaunchN(in.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_columns_ptr, idx); auto in_column = in.subspan(d_columns_ptr[column_id], d_columns_ptr[column_id + 1] - d_columns_ptr[column_id]); idx -= d_columns_ptr[column_id]; float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin; if (in_column[idx].rmin < prev_rmin) { in_column[idx].rmin = prev_rmin; } float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax; if (in_column[idx].rmax < prev_rmax) { in_column[idx].rmax = prev_rmax; } float rmin_next = in_column[idx].RMinNext(); if (in_column[idx].rmax < rmin_next) { in_column[idx].rmax = rmin_next; } }); } void SketchContainer::AllReduce() { dh::safe_cuda(cudaSetDevice(device_)); auto world = rabit::GetWorldSize(); if (world == 1) { return; } timer_.Start(__func__); if (!reducer_) { reducer_ = std::make_unique<dh::AllReducer>(); reducer_->Init(device_); } // Reduce the overhead on syncing. size_t global_sum_rows = num_rows_; rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1); size_t intermediate_num_cuts = std::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor)); this->Prune(intermediate_num_cuts); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1); size_t n = d_columns_ptr.size(); rabit::Allreduce<rabit::op::Max>(&n, 1); CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers"; // Get the columns ptr from all workers dh::device_vector<SketchContainer::OffsetT> gathered_ptrs; gathered_ptrs.resize(d_columns_ptr.size() * world, 0); size_t rank = rabit::GetRank(); auto offset = rank * d_columns_ptr.size(); thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(), gathered_ptrs.begin() + offset); reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(), gathered_ptrs.size()); // Get the data from all workers. std::vector<size_t> recv_lengths; dh::caching_device_vector<char> recvbuf; reducer_->AllGather(this->Current().data().get(), dh::ToSpan(this->Current()).size_bytes(), &recv_lengths, &recvbuf); reducer_->Synchronize(); // Segment the received data. auto s_recvbuf = dh::ToSpan(recvbuf); std::vector<Span<SketchEntry>> allworkers; offset = 0; for (int32_t i = 0; i < world; ++i) { size_t length_as_bytes = recv_lengths.at(i); auto raw = s_recvbuf.subspan(offset, length_as_bytes); auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()), length_as_bytes / sizeof(SketchEntry)); allworkers.emplace_back(sketch); offset += length_as_bytes; } // Merge them into a new sketch. SketchContainer new_sketch(this->feature_types_, num_bins_, this->num_columns_, global_sum_rows, this->device_); for (size_t i = 0; i < allworkers.size(); ++i) { auto worker = allworkers[i]; auto worker_ptr = dh::ToSpan(gathered_ptrs) .subspan(i * d_columns_ptr.size(), d_columns_ptr.size()); new_sketch.Merge(worker_ptr, worker); new_sketch.FixError(); } *this = std::move(new_sketch); timer_.Stop(__func__); } namespace { struct InvalidCatOp { Span<SketchEntry const> values; Span<size_t const> ptrs; Span<FeatureType const> ft; XGBOOST_DEVICE bool operator()(size_t i) const { auto fidx = dh::SegmentId(ptrs, i); return IsCat(ft, fidx) && InvalidCat(values[i].value); } }; } // anonymous namespace void SketchContainer::MakeCuts(HistogramCuts* p_cuts) { timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); p_cuts->min_vals_.Resize(num_columns_); // Sync between workers. this->AllReduce(); // Prune to final number of bins. this->Prune(num_bins_ + 1); this->FixError(); // Set up inputs auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); p_cuts->min_vals_.SetDevice(device_); auto d_min_values = p_cuts->min_vals_.DeviceSpan(); auto const in_cut_values = dh::ToSpan(this->Current()); // Set up output ptr p_cuts->cut_ptrs_.SetDevice(device_); auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector(); h_out_columns_ptr.clear(); h_out_columns_ptr.push_back(0); auto const& h_feature_types = this->feature_types_.ConstHostSpan(); auto d_ft = feature_types_.ConstDeviceSpan(); std::vector<SketchEntry> max_values; float max_cat{-1.f}; if (has_categorical_) { dh::XGBCachingDeviceAllocator<char> alloc; auto key_it = dh::MakeTransformIterator<bst_feature_t>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) -> bst_feature_t { return dh::SegmentId(d_in_columns_ptr, i); }); auto invalid_op = InvalidCatOp{in_cut_values, d_in_columns_ptr, d_ft}; auto val_it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] XGBOOST_DEVICE(size_t i) { auto fidx = dh::SegmentId(d_in_columns_ptr, i); auto v = in_cut_values[i]; if (IsCat(d_ft, fidx)) { if (invalid_op(i)) { // use inf to indicate invalid value, this way we can keep it as in // indicator in the reduce operation as it's always the greatest value. v.value = std::numeric_limits<float>::infinity(); } } return v; }); CHECK_EQ(num_columns_, d_in_columns_ptr.size() - 1); max_values.resize(d_in_columns_ptr.size() - 1); dh::caching_device_vector<SketchEntry> d_max_values(d_in_columns_ptr.size() - 1); thrust::reduce_by_key(thrust::cuda::par(alloc), key_it, key_it + in_cut_values.size(), val_it, thrust::make_discard_iterator(), d_max_values.begin(), thrust::equal_to<bst_feature_t>{}, [] __device__(auto l, auto r) { return l.value > r.value ? l : r; }); dh::CopyDeviceSpanToVector(&max_values, dh::ToSpan(d_max_values)); auto max_it = common::MakeIndexTransformIter([&](auto i) { if (IsCat(h_feature_types, i)) { return max_values[i].value; } return -1.f; }); max_cat = *std::max_element(max_it, max_it + max_values.size()); if (std::isinf(max_cat)) { InvalidCategory(); } } // Set up output cuts for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t column_size = std::max(static_cast<size_t>(1ul), this->Column(i).size()); if (IsCat(h_feature_types, i)) { // column_size is the number of unique values in that feature. CheckMaxCat(max_values[i].value, column_size); h_out_columns_ptr.push_back(max_values[i].value + 1); // includes both max_cat and 0. } else { h_out_columns_ptr.push_back( std::min(static_cast<size_t>(column_size), static_cast<size_t>(num_bins_))); } } std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(), h_out_columns_ptr.begin()); auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan(); size_t total_bins = h_out_columns_ptr.back(); p_cuts->cut_values_.SetDevice(device_); p_cuts->cut_values_.Resize(total_bins); auto out_cut_values = p_cuts->cut_values_.DeviceSpan(); dh::LaunchN(total_bins, [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_out_columns_ptr, idx); auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id], d_in_columns_ptr[column_id + 1] - d_in_columns_ptr[column_id]); auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id], d_out_columns_ptr[column_id + 1] - d_out_columns_ptr[column_id]); idx -= d_out_columns_ptr[column_id]; if (in_column.size() == 0) { // If the column is empty, we push a dummy value. It won't affect training as the // column is empty, trees cannot split on it. This is just to be consistent with // rest of the library. if (idx == 0) { d_min_values[column_id] = kRtEps; out_column[0] = kRtEps; assert(out_column.size() == 1); } return; } if (idx == 0 && !IsCat(d_ft, column_id)) { auto mval = in_column[idx].value; d_min_values[column_id] = mval - (fabs(mval) + 1e-5); } if (IsCat(d_ft, column_id)) { out_column[idx] = idx; return; } // Last thread is responsible for setting a value that's greater than other cuts. if (idx == out_column.size() - 1) { const bst_float cpt = in_column.back().value; // this must be bigger than last value in a scale const bst_float last = cpt + (fabs(cpt) + 1e-5); out_column[idx] = last; return; } assert(idx+1 < in_column.size()); out_column[idx] = in_column[idx+1].value; }); p_cuts->SetCategorical(this->has_categorical_, max_cat); timer_.Stop(__func__); } } // namespace common } // namespace xgboost
d93f2cc814506674e5a0e4e2553d0c87528605f1.hip
// !!! This is a file automatically generated by hipify!!! /* Author: Anand Madhavan */ #include <sys/types.h> #include <dirent.h> #include <errno.h> #include <vector> #include <string> #include <iostream> #include <fstream> #include "utils.hh" #include "GPU.hh" #include <vector> #include <string> #include <set> #include "rocblas.h" #include <sstream> #include "Matrix.hh" #include "l1ls_coord_descent.hh" #include "proj_grad_descent.h" using namespace std; bool g_verbose; void print_usage(const char** argv) { std::cout << "Usage: " << argv[0] << "-runwhat=<natural|digits|basis|coeffs|findcoeffs> "<< "-mb=<batch size> -nb=<number of batches> " << "-n=<# basis vectors> -k=<num dims> -nepoch=<iterations> -imagesdir=<images dir>" << "-filename=<coeffs/digits file> -basisdir=<basis output directory> " << "-beta=<beta> -sigma=<sigma> -eta=<eta> " << "-nbasis_iters=<number of iterations for basis computation> " << "-basisfile=<basis file for coefficient computation> " << "-labelfile= < label file for letters> " << std::endl; } struct Options { Options():device(0),sigma(1),c(1),nepoch(1), nbasis_iters(15),eta(0.01),beta(0.4),tol(1e-1), filename(0),runwhat(0),basisfile(0),labelfile(0),numtrain(100){ } ~Options(){} int device; bool verbose; bool run_tests; char *filename, *runwhat, *basisfile, *labelfile; char* imagesdir, *basisdir; int k; int mb,nb; // m for a batch, nb batchces int n, nepoch; float sigma, c; float eta, beta, tol; int nbasis_iters, numtrain; }; bool read_data_coeff(const std::string& fname, float& gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Stats stats; // read file small_file.txt // read matrices FILE* inf = fopen(fname.c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } fscanf(inf,"gamma %g\n",&gamma); // std::cerr << "gamma " << gamma << std::endl; if(!read_matrix(inf,A,"A",false)) { cout << "Error reading matrix A\n"; return false; } // DEBUGIFY(print_matrix(A,"A")); if(!read_matrix(inf,Y,"Y",false)) { cout << "Error reading matrix Y\n"; return false; } // DEBUGIFY(print_matrix(Y,"Y")); if(!read_matrix(inf,Xinit,"Xinit",false)) { cout << "Error reading matrix Xinit\n"; return false; } // DEBUGIFY(print_matrix(Xinit,"Xinit")); if(!read_matrix(inf,Xout,"Xout",false)) { cout << "Error reading matrix Xout\n"; return false; } // DEBUGIFY(print_matrix(Xout,"Xout")); fclose(inf); return true; } bool test_cpu(float gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Matrix c_xout; float expt_time=0; { cpu::CpuEventTimer timer(expt_time); l1ls_coord_descent(c_xout, gamma, A, Y, Xinit); } std::cerr<<"\nAvg error in xout: " << avgdiff(c_xout,Xout) << ", cpu time: " << expt_time << "[ms]\n"; freeup(c_xout); return true; } bool test_gpu(float gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Matrix c_xout; float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(c_xout, gamma, A, Y); } std::cerr<<"\nAvg error in xout: " << avgdiff(c_xout,Xout) << ", gpu time: " << expt_time << "[ms]\n"; freeup(c_xout); return true; } bool test(bool cpu) { std::string fname("data.txt"); float gamma; Matrix A, Y, Xinit, Xout; if(!read_data_coeff(fname,gamma,A,Y,Xinit,Xout)) { std::cerr << "Error reading matrices\n"; return false; } int k = num_rows(Y); int m = num_cols(Y); int n = num_cols(A); printf("k=%d, m=%d, n=%d\n",k,m,n); bool ret_val; if(cpu) ret_val = test_cpu(gamma,A,Y,Xinit,Xout); else ret_val = test_gpu(gamma,A,Y,Xinit,Xout); freeup(A); freeup(Y); freeup(Xinit); freeup(Xout); return ret_val; } void run_expt(const Options& opts) { std::string fname(opts.filename?opts.filename:"data.txt"); float gamma; Matrix A, Y, Xinit, Xout; if(!read_data_coeff(fname,gamma,A,Y,Xinit,Xout)) { std::cerr << "Error reading matrices\n"; return; } int k = num_rows(Y); int m = num_cols(Y); int n = num_cols(A); Matrix c_xout; float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(c_xout, gamma, A, Y); } printf("gamma: %.5f\n",gamma); printf("k: %d\nm: %d\nn: %d\n",k,m,n); printf("avg_error_in_dd_cuda: %f\n", avgdiff(c_xout,Xout)); printf("dd_cuda_time: %f\n",expt_time/1000.0); freeup(c_xout); freeup(A); freeup(Y); freeup(Xinit); freeup(Xout); return; } bool run(const Options& opts, Stats& stats) { DEBUGIFY(std::cerr << "\nRunning\n";); if(opts.device<0) { cpu::CpuEventTimer timer(stats.total_time); test(true); } else { cpu::CpuEventTimer timer(stats.total_time); } return true; } void read_options(int argc, const char** argv, Options& opts) { if(cutCheckCmdLineFlag(argc, argv, "help")) { print_usage(argv); exit(1); } if(cutCheckCmdLineFlag(argc,argv,"filename")) cutGetCmdLineArgumentstr(argc, argv, "filename", &(opts.filename)); if(cutCheckCmdLineFlag(argc,argv,"imagesdir")) cutGetCmdLineArgumentstr(argc, argv, "imagesdir", &(opts.imagesdir)); if(cutCheckCmdLineFlag(argc,argv,"basisdir")) cutGetCmdLineArgumentstr(argc, argv, "basisdir", &(opts.basisdir)); if(cutCheckCmdLineFlag(argc,argv,"runwhat")) cutGetCmdLineArgumentstr(argc, argv, "runwhat", &(opts.runwhat)); if(cutCheckCmdLineFlag(argc,argv,"basisfile")) cutGetCmdLineArgumentstr(argc, argv, "basisfile", &(opts.basisfile)); if(cutCheckCmdLineFlag(argc,argv,"labelfile")) cutGetCmdLineArgumentstr(argc, argv, "labelfile", &(opts.labelfile)); #define GET_CMD_LINE_ARG_I(a) cutGetCmdLineArgumenti(argc, argv, #a, &(opts.a)); GET_CMD_LINE_ARG_I(k); GET_CMD_LINE_ARG_I(mb); GET_CMD_LINE_ARG_I(n); GET_CMD_LINE_ARG_I(nb); GET_CMD_LINE_ARG_I(nepoch); GET_CMD_LINE_ARG_I(numtrain); GET_CMD_LINE_ARG_I(device); GET_CMD_LINE_ARG_I(nbasis_iters); #define GET_CMD_LINE_ARG_F(a) cutGetCmdLineArgumentf(argc, argv, #a, &(opts.a)); GET_CMD_LINE_ARG_F(sigma); GET_CMD_LINE_ARG_F(c); GET_CMD_LINE_ARG_F(eta); GET_CMD_LINE_ARG_F(beta); GET_CMD_LINE_ARG_F(tol); #define GET_CMD_LINE_FLAG(a) if(cutCheckCmdLineFlag(argc, argv, #a)) (opts.a) = true; GET_CMD_LINE_FLAG(verbose); g_verbose = opts.verbose; } int main_coeffs(int argc, const char** argv) { float expt_time=0; { cpu::CpuEventTimer timer(expt_time); Options opts; read_options(argc, argv, opts); std::string device_name = gpu::initialize_device(opts.device); run_expt(opts); } return 0; } bool read_data_basis(const std::string& fname, float& c, float& sigma, Matrix& Binit, Matrix& X, Matrix& S, Matrix& Bout) { // read matrices FILE* inf = fopen(fname.c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } fscanf(inf,"c %g\n",&c); std::cerr << "c " << c << std::endl; fscanf(inf,"sigma %g\n",&sigma); std::cerr << "sigma " << sigma << std::endl; if(!read_matrix(inf,Binit,"Binit",false)) { cout << "Error reading matrix Binit\n"; return false; } if(!read_matrix(inf,X,"X",false)) { cout << "Error reading matrix X\n"; return false; } if(!read_matrix(inf,S,"S",false)) { cout << "Error reading matrix S\n"; return false; } if(!read_matrix(inf,Bout,"Bout",false)) { cout << "Error reading matrix Bout\n"; return false; } fclose(inf); return true; } void run_expt_basis(const Options& opts) { std::string fname(opts.filename?opts.filename:"data.txt"); Matrix Binit; // A Matrix X; // Y Matrix S; // X dimensions Matrix Bout; // B float c, sigma; if(!read_data_basis(fname,c,sigma,Binit,X,S,Bout)) { std::cerr << "Error reading matrices\n"; return; } int k = num_rows(X); // A is B int m = num_cols(X); // X is S int n = num_cols(Binit); // Y is X Matrix c_bout; init(c_bout,k,n,false); float expt_time=0; { gpu::GpuEventTimer timer(expt_time); proj_grad_descent_cu(c_bout, c, sigma, opts.eta, opts.beta, opts.tol, opts.nbasis_iters, Binit, X, S); } printf("c: %.5f\n",c); printf("sigma: %.5f\n",sigma); printf("k: %d\nm: %d\nn: %d\n",k,m,n); printf("avg_error_in_basis: %f\n", avgdiff(c_bout,Bout)); printf("cublas_time: %f\n",expt_time/1000.0); freeup(c_bout); freeup(Binit); freeup(X); freeup(S); freeup(Bout); return; } int main_basis(int argc, const char** argv) { float expt_time=0; { cpu::CpuEventTimer timer(expt_time); Options opts; read_options(argc, argv, opts); std::string device_name = gpu::initialize_device(opts.device); run_expt_basis(opts); } return 0; } int read_images(string dirname, vector<Matrix*>& imgs) { // cerr << "Reading images\n"; // store in 62 Matrix objects vector<string> img_files; DIR *dir; struct dirent *dirp; if((dir = opendir(dirname.c_str())) == NULL) { cout << "Error(" << errno << ") opening directory: " << dirname<< endl; exit(-1); } while ((dirp = readdir(dir)) != NULL) { string dirpname = string(dirp->d_name); if(dirpname==".") continue; if(dirpname=="..") continue; if(dirpname.find("IMAGES")==string::npos) { cout << "Skipping file: " << dirpname << endl; continue; } string tmp = dirname; tmp.append("/"); tmp.append(dirpname); img_files.push_back(tmp); } closedir(dir); int ret_val=0; for(int i=0;i<img_files.size();i++) { // cerr << img_files[i]<<endl; Matrix* m = new Matrix(); FILE* inf = fopen(img_files[i].c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } read_matrix(inf,*m,"",false); // print_matrix(*m,"Image"); ret_val = num_rows(*m); imgs.push_back(m); fclose(inf); } // cerr << imgs.size() <<" images read\n"; // cerr << "Done reading images\n"; return ret_val; } bool write_basis(const Options& opts, string tag, Matrix& B, int iepoch) { stringstream tmpstr; tmpstr << opts.basisdir; tmpstr << "/basis_" << tag << iepoch << ".txt"; FILE* ouf = fopen(tmpstr.str().c_str(),"wt"); if(ouf==0) { cerr << "Cannot open file "<< tmpstr.str(); return false; } // cerr << "B num rows: " << num_rows(B); // cerr << "B num cols: " << num_cols(B); for(int i=0;i<num_rows(B);++i) { for(int j=0;j<num_cols(B);++j) { fprintf(ouf,"%g ",get_val(B,i,j)); } fprintf(ouf,"\n"); } fclose(ouf); return true; } void get_images_as_input_matrix(Matrix& bigX, const Options& opts) { int k = opts.k; int m = opts.mb; // 1) read all images into memory... vector<Matrix*> imgs; int img_size = read_images(opts.imagesdir,imgs); // 2) arrange images into X matrix: in random patches ... int dim = int(sqrt(opts.k)); int nbatches = opts.nb; // first pick random image, then pick random patch in it // repeat this for 1000 images // repeat this 100 times or so int buffer = dim; // Initialize B and S to random values init(bigX, k, m*nbatches, false); for(size_t iim=0;iim< m*nbatches; ++iim) { // pick random image... int img_index = rand()%imgs.size(); // in the range 0 to imgs.size() // now pick in the range between buffer and img_size-buffer-dim const Matrix& img = *(imgs[img_index]); int upper = img_size-buffer-dim; int lower = buffer; int rr_pos = rand()%(upper-lower)+lower; int rc_pos = rand()%(upper-lower)+lower; int r_in_x=0; for(size_t c=0;c<dim;++c) { for(size_t r=0;r<dim;r++) { set_val(bigX,r_in_x++,iim,get_val(img,rr_pos+r,rc_pos+c)); } } } for(size_t i=0;i<imgs.size();++i) { freeup(*imgs[i]); delete imgs[i]; } } void run_expt_together(const Matrix& bigX, const Options& opts) { int k = opts.k; int m = opts.mb; int n = opts.n; int nbatches = opts.nb; cerr << "basis_iters: " << opts.nbasis_iters << endl; cerr << "eta: " << opts.eta << endl; cerr << "beta: " << opts.beta << endl; cerr << "sigma: " << opts.sigma << endl; cerr << "k: " << k << endl; cerr << "m: " << m << endl; cerr << "n: " << n << endl; cerr << "nb: " << nbatches << endl; hipblasInit(); gpu::checkErrors(); float gamma = 2*opts.sigma*opts.sigma*opts.beta; cerr << "gamma: " << gamma << endl; float *B_on_dev, *BtB_on_dev, *X_on_dev, *XtB_on_dev, *S_on_dev; onetime_setup(k,m,n,gamma, &B_on_dev, &BtB_on_dev, &X_on_dev, &XtB_on_dev, &S_on_dev); gpu::checkErrors(); Matrix B; init(B, k, n, false); for(int i=0;i<k;i++) { for(int j=0;j<n;j++) { set_val(B,i,j,((float)rand()/(float)RAND_MAX)-0.5); } } for(int j=0;j<n;j++) { float col_avg = 0; for(int i=0;i<k;i++) { col_avg += get_val(B,i,j); } col_avg /= (float)(k); for(int i=0;i<k;i++) { set_val(B,i,j,get_val(B,i,j)-col_avg); } float col_norm = 0; for(int i=0;i<k;i++) { col_norm += get_val(B,i,j)*get_val(B,i,j); } col_norm = sqrt(col_norm); for(int i=0;i<k;i++) { set_val(B,i,j,get_val(B,i,j)/col_norm); } } cutilSafeCall(hipMemcpy(B_on_dev,B.values,k*n*sizeof(float),hipMemcpyHostToDevice)); gpu::checkErrors(); float *SSt2_on_dev, *XSt2_on_dev, *G_on_dev, *X_BS_on_dev; onetime_setup_pg(k, m, n, &SSt2_on_dev,&XSt2_on_dev,&G_on_dev,&X_BS_on_dev); gpu::checkErrors(); gpu::checkCublasError(); std::vector<Matrix*> ss; cerr << "iepoch, fobj, coeffs_time, basis_time, avg_nnz\n"; for(int iepoch=0;iepoch<opts.nepoch; iepoch++) { float fobj=0; float coeff_time=0, basis_time=0; int nonzeros = 0; for(int ibatch=0;ibatch<nbatches;++ibatch) { Matrix X; X.row_contiguous = false; X.num_ptrs = m; X.num_vals = k; X.values = bigX.values+(ibatch*m*k); Matrix* S=0; if(ss.size()<(ibatch+1)) { S = new Matrix(); init(*S, n, m, false); ss.push_back(S); for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { set_val(*S,i,j,0); } } } else { // Reusing S for batch... S = ss[ibatch]; } // Repeat until convergence of original cost function: // load S, X cutilSafeCall(hipMemcpy(S_on_dev,S->values, n*m*sizeof(float),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(X_on_dev,X.values, k*m*sizeof(float),hipMemcpyHostToDevice)); gpu::checkErrors(); gpu::checkCublasError(); // 3) solve for coefficients using fixed B float l1ls_time=0; { gpu::GpuEventTimer timer(l1ls_time); l1ls_coord_descent_cu_basic(k, m, n, B_on_dev, BtB_on_dev, X_on_dev, XtB_on_dev, S_on_dev); gpu::checkErrors(); gpu::checkCublasError(); } coeff_time +=l1ls_time; cutilSafeCall(hipMemcpy((void *)(S->values),(const void *)(S_on_dev), n*m*sizeof(float),hipMemcpyDeviceToHost)); gpu::checkErrors(); gpu::checkCublasError(); fobj += calc_objective(k,m,n,opts.sigma,opts.beta,B_on_dev,S_on_dev,X_on_dev,X_BS_on_dev); // 4) solve for basis using fixed S float b_time=0; { gpu::GpuEventTimer timer(b_time); float tmp = proj_grad_descent_cu_basic (opts.c,opts.sigma, opts.eta, opts.beta, opts.nbasis_iters, k,m,n, B_on_dev,X_on_dev, S_on_dev,SSt2_on_dev,XSt2_on_dev,G_on_dev,X_BS_on_dev); gpu::checkErrors(); gpu::checkCublasError(); } basis_time +=b_time; nonzeros += nnz(*S); } cerr << iepoch+1 << ", " << fobj/((float)(nbatches*m)) << ", " << coeff_time/1000.0 << ", " << basis_time/1000.0 << ", " << (float)nonzeros/(float)(nbatches*m*n) << endl; cutilSafeCall(hipMemcpy((void *)(B.values),(const void *)(B_on_dev), k*n*sizeof(float),hipMemcpyDeviceToHost)); write_basis(opts,"",B,iepoch); } cutilSafeCall(hipMemcpy((void *)(B.values),(const void *)(B_on_dev), k*n*sizeof(float),hipMemcpyDeviceToHost)); onetime_teardown(B_on_dev, BtB_on_dev, X_on_dev, XtB_on_dev, S_on_dev); onetime_teardown_pg(SSt2_on_dev, XSt2_on_dev, G_on_dev, X_BS_on_dev); hipblasShutdown(); for(size_t i=0;i<ss.size();++i) { freeup(*ss[i]); delete ss[i]; } } void run_expt_on_natural_images(const Options& opts) { cerr << "Getting basis from natural images\n"; Matrix bigX; get_images_as_input_matrix(bigX,opts); run_expt_together(bigX,opts); freeup(bigX); } void get_digits_as_input_matrix(Matrix& bigX, const Options& opts) { cerr << "Reading " << opts.filename << endl; FILE* inf = fopen(opts.filename,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,196,60000,bigX,false); } void run_expt_on_digits(const Options& opts) { cerr << "Getting basis from digits\n"; Matrix bigX; get_digits_as_input_matrix(bigX,opts); run_expt_together(bigX,opts); freeup(bigX); } void get_letters_as_input_matrix(Matrix& bigX, const Options& opts) { cerr << "Reading " << opts.filename << endl; FILE* inf = fopen(opts.filename,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,196,52152,bigX,false); } void get_letters_labels(Matrix& labels, const Options& opts) { cerr << "Reading letters label file: " << opts.labelfile << endl; if(opts.labelfile==0) { cerr << "Specify letters label file\n"; return; } FILE* inf = fopen(opts.labelfile,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,52152,1,labels,false); } void read_basis(Matrix& B, const Options& opts) { cerr << "Reading basis file: " << opts.basisfile << endl; if(opts.basisfile==0) { cerr << "Please specify basis file for coefficient computation\n"; return; } FILE* inf = fopen(opts.basisfile,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,opts.k,opts.n,B,false); } void write_svm_light_data(string filename, const Matrix& X, const Matrix& labels) { // write in svmlight recognizable format: FILE* inf = fopen(filename.c_str(),"w"); for(size_t ic=0; ic<num_cols(X); ic++) { // printf("Label: %g\n",get_val(labels,ic,0)+1); fprintf(inf,"%g",get_val(labels,ic,0)+1); for(size_t ir=0;ir<num_rows(X); ir++) { float val = get_val(X,ir,ic); if(val>1e-14) { // svmlight wants >=1 feature values. fprintf(inf," %d:%g",ir+1,val); } } fprintf(inf,"\n"); } fclose(inf); } void write_test_train_data(const Matrix& testX, const Matrix& testLabels, const Matrix& trainX,const Matrix& trainLabels, const Options& opts) { cerr << "Writing train.dat\n"; write_svm_light_data("train.dat",trainX,trainLabels); cerr << "Writing test.dat\n"; write_svm_light_data("test.dat",testX,testLabels); } int randint(int l, int u) { return l + rand()%(u-l+1); } void partition_into_train_test(Matrix& trainX, Matrix& trainLabels, Matrix& testX, Matrix& testLabels, const Matrix& inp, const Matrix& inpl, const Options& opts) { // cerr << "RANDMAX: " << RAND_MAX << endl; // go through matrix and pick randomly 100 columns... int numtrain=opts.numtrain; int m = num_cols(inp); cerr << "Paritionning into " << numtrain << " train and " << m-numtrain << " test\n"; int k = num_rows(inp); set<int> traincols; while(traincols.size()<numtrain) { int random = randint(0,m-1); traincols.insert(random); } init(trainX,k,numtrain,false); init(testX,k,m-numtrain,false); init(trainLabels,numtrain,1,false); init(testLabels,m-numtrain,1,false); int ictrain=0, ictest=0; for(size_t ic=0; ic<m; ++ic) { if(traincols.find(ic)!=traincols.end()) { for(size_t ir=0; ir<k; ++ir) { set_val(trainX,ir,ictrain,get_val(inp,ir,ic)); } set_val(trainLabels,ictrain,0,get_val(inpl,ic,0)); ictrain++; } else { for(size_t ir=0; ir<k; ++ir) { float val = get_val(inp,ir,ic); set_val(testX,ir,ictest,val); } set_val(testLabels,ictest,0,get_val(inpl,ic,0)); ictest++; } } } void find_coeffs(Matrix& S, const Matrix& X, const Matrix& B, const Options& opts) { cerr << "Finding coefficients for given inputs\n"; int m = num_cols(X); int n = num_cols(B); int k = num_rows(X); float gamma = 2*opts.sigma*opts.sigma*opts.beta; cerr << "k: " << k << endl; cerr << "m: " << m << endl; cerr << "n: " << n << endl; cerr << "gamma: " << gamma << endl; init(S,n,m,false); std::string device_name = gpu::initialize_device(opts.device); float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(S, gamma, B, X); gpu::checkErrors(); } } void find_coeffs_for_letters(const Options& opts) { cerr << "Getting coeffs for letters\n"; Matrix inp, inpl; get_letters_as_input_matrix(inp,opts); get_letters_labels(inpl, opts); Matrix testX, trainX, testLabels, trainLabels; partition_into_train_test(trainX,trainLabels,testX,testLabels,inp,inpl,opts); Matrix B,S; read_basis(B,opts); find_coeffs(S,trainX,B,opts); write_test_train_data(testX,testLabels,trainX,trainLabels,opts); freeup(testX); freeup(trainX); freeup(inp); freeup(testLabels); freeup(trainLabels); freeup(inpl); freeup(B); freeup(S); } int main(int argc, const char** argv) { Options opts; read_options(argc,argv,opts); if(opts.runwhat==0) { cerr << "Run what?\n"; print_usage(argv); exit(1); } if(strcmp(opts.runwhat,"coeffs")==0) { cerr << "Just coeffs...\n"; main_coeffs(argc,argv); return 0; } else if(strcmp(opts.runwhat,"natural")==0) { cerr << "On natural images...\n"; if(opts.imagesdir==0) { print_usage(argv); std::cerr << "Specify input images directory\n"; return 0; } if(opts.basisdir==0) { print_usage(argv); std::cerr << "Specify output basis directory\n"; return 0; } cerr << "Writing to basis dir: " << opts.basisdir << endl; std::string device_name = gpu::initialize_device(opts.device); cerr << "Initialized device: " << device_name<<endl; run_expt_on_natural_images(opts); return 0; } else if(strcmp(opts.runwhat,"digits")==0) { cerr << "On digits...\n"; if(opts.filename==0) { print_usage(argv); std::cerr << "Cannot open file\n"; return 0; } if(opts.basisdir==0) { print_usage(argv); std::cerr << "Specify output basis directory\n"; return 0; } cerr << "Writing to basis dir: " << opts.basisdir << endl; run_expt_on_digits(opts); return 0; } else if(strcmp(opts.runwhat,"basis")==0) { cerr << "Just basis...\n"; main_basis(argc,argv); } else if(strcmp(opts.runwhat,"findcoeffs")==0) { cerr << "Finding coefficients\n"; find_coeffs_for_letters(opts); return 0; } return 0; }
d93f2cc814506674e5a0e4e2553d0c87528605f1.cu
/* Author: Anand Madhavan */ #include <sys/types.h> #include <dirent.h> #include <errno.h> #include <vector> #include <string> #include <iostream> #include <fstream> #include "utils.hh" #include "GPU.hh" #include <vector> #include <string> #include <set> #include "cublas.h" #include <sstream> #include "Matrix.hh" #include "l1ls_coord_descent.hh" #include "proj_grad_descent.h" using namespace std; bool g_verbose; void print_usage(const char** argv) { std::cout << "Usage: " << argv[0] << "-runwhat=<natural|digits|basis|coeffs|findcoeffs> "<< "-mb=<batch size> -nb=<number of batches> " << "-n=<# basis vectors> -k=<num dims> -nepoch=<iterations> -imagesdir=<images dir>" << "-filename=<coeffs/digits file> -basisdir=<basis output directory> " << "-beta=<beta> -sigma=<sigma> -eta=<eta> " << "-nbasis_iters=<number of iterations for basis computation> " << "-basisfile=<basis file for coefficient computation> " << "-labelfile= < label file for letters> " << std::endl; } struct Options { Options():device(0),sigma(1),c(1),nepoch(1), nbasis_iters(15),eta(0.01),beta(0.4),tol(1e-1), filename(0),runwhat(0),basisfile(0),labelfile(0),numtrain(100){ } ~Options(){} int device; bool verbose; bool run_tests; char *filename, *runwhat, *basisfile, *labelfile; char* imagesdir, *basisdir; int k; int mb,nb; // m for a batch, nb batchces int n, nepoch; float sigma, c; float eta, beta, tol; int nbasis_iters, numtrain; }; bool read_data_coeff(const std::string& fname, float& gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Stats stats; // read file small_file.txt // read matrices FILE* inf = fopen(fname.c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } fscanf(inf,"gamma %g\n",&gamma); // std::cerr << "gamma " << gamma << std::endl; if(!read_matrix(inf,A,"A",false)) { cout << "Error reading matrix A\n"; return false; } // DEBUGIFY(print_matrix(A,"A")); if(!read_matrix(inf,Y,"Y",false)) { cout << "Error reading matrix Y\n"; return false; } // DEBUGIFY(print_matrix(Y,"Y")); if(!read_matrix(inf,Xinit,"Xinit",false)) { cout << "Error reading matrix Xinit\n"; return false; } // DEBUGIFY(print_matrix(Xinit,"Xinit")); if(!read_matrix(inf,Xout,"Xout",false)) { cout << "Error reading matrix Xout\n"; return false; } // DEBUGIFY(print_matrix(Xout,"Xout")); fclose(inf); return true; } bool test_cpu(float gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Matrix c_xout; float expt_time=0; { cpu::CpuEventTimer timer(expt_time); l1ls_coord_descent(c_xout, gamma, A, Y, Xinit); } std::cerr<<"\nAvg error in xout: " << avgdiff(c_xout,Xout) << ", cpu time: " << expt_time << "[ms]\n"; freeup(c_xout); return true; } bool test_gpu(float gamma, Matrix& A, Matrix& Y, Matrix& Xinit, Matrix& Xout) { Matrix c_xout; float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(c_xout, gamma, A, Y); } std::cerr<<"\nAvg error in xout: " << avgdiff(c_xout,Xout) << ", gpu time: " << expt_time << "[ms]\n"; freeup(c_xout); return true; } bool test(bool cpu) { std::string fname("data.txt"); float gamma; Matrix A, Y, Xinit, Xout; if(!read_data_coeff(fname,gamma,A,Y,Xinit,Xout)) { std::cerr << "Error reading matrices\n"; return false; } int k = num_rows(Y); int m = num_cols(Y); int n = num_cols(A); printf("k=%d, m=%d, n=%d\n",k,m,n); bool ret_val; if(cpu) ret_val = test_cpu(gamma,A,Y,Xinit,Xout); else ret_val = test_gpu(gamma,A,Y,Xinit,Xout); freeup(A); freeup(Y); freeup(Xinit); freeup(Xout); return ret_val; } void run_expt(const Options& opts) { std::string fname(opts.filename?opts.filename:"data.txt"); float gamma; Matrix A, Y, Xinit, Xout; if(!read_data_coeff(fname,gamma,A,Y,Xinit,Xout)) { std::cerr << "Error reading matrices\n"; return; } int k = num_rows(Y); int m = num_cols(Y); int n = num_cols(A); Matrix c_xout; float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(c_xout, gamma, A, Y); } printf("gamma: %.5f\n",gamma); printf("k: %d\nm: %d\nn: %d\n",k,m,n); printf("avg_error_in_dd_cuda: %f\n", avgdiff(c_xout,Xout)); printf("dd_cuda_time: %f\n",expt_time/1000.0); freeup(c_xout); freeup(A); freeup(Y); freeup(Xinit); freeup(Xout); return; } bool run(const Options& opts, Stats& stats) { DEBUGIFY(std::cerr << "\nRunning\n";); if(opts.device<0) { cpu::CpuEventTimer timer(stats.total_time); test(true); } else { cpu::CpuEventTimer timer(stats.total_time); } return true; } void read_options(int argc, const char** argv, Options& opts) { if(cutCheckCmdLineFlag(argc, argv, "help")) { print_usage(argv); exit(1); } if(cutCheckCmdLineFlag(argc,argv,"filename")) cutGetCmdLineArgumentstr(argc, argv, "filename", &(opts.filename)); if(cutCheckCmdLineFlag(argc,argv,"imagesdir")) cutGetCmdLineArgumentstr(argc, argv, "imagesdir", &(opts.imagesdir)); if(cutCheckCmdLineFlag(argc,argv,"basisdir")) cutGetCmdLineArgumentstr(argc, argv, "basisdir", &(opts.basisdir)); if(cutCheckCmdLineFlag(argc,argv,"runwhat")) cutGetCmdLineArgumentstr(argc, argv, "runwhat", &(opts.runwhat)); if(cutCheckCmdLineFlag(argc,argv,"basisfile")) cutGetCmdLineArgumentstr(argc, argv, "basisfile", &(opts.basisfile)); if(cutCheckCmdLineFlag(argc,argv,"labelfile")) cutGetCmdLineArgumentstr(argc, argv, "labelfile", &(opts.labelfile)); #define GET_CMD_LINE_ARG_I(a) cutGetCmdLineArgumenti(argc, argv, #a, &(opts.a)); GET_CMD_LINE_ARG_I(k); GET_CMD_LINE_ARG_I(mb); GET_CMD_LINE_ARG_I(n); GET_CMD_LINE_ARG_I(nb); GET_CMD_LINE_ARG_I(nepoch); GET_CMD_LINE_ARG_I(numtrain); GET_CMD_LINE_ARG_I(device); GET_CMD_LINE_ARG_I(nbasis_iters); #define GET_CMD_LINE_ARG_F(a) cutGetCmdLineArgumentf(argc, argv, #a, &(opts.a)); GET_CMD_LINE_ARG_F(sigma); GET_CMD_LINE_ARG_F(c); GET_CMD_LINE_ARG_F(eta); GET_CMD_LINE_ARG_F(beta); GET_CMD_LINE_ARG_F(tol); #define GET_CMD_LINE_FLAG(a) if(cutCheckCmdLineFlag(argc, argv, #a)) (opts.a) = true; GET_CMD_LINE_FLAG(verbose); g_verbose = opts.verbose; } int main_coeffs(int argc, const char** argv) { float expt_time=0; { cpu::CpuEventTimer timer(expt_time); Options opts; read_options(argc, argv, opts); std::string device_name = gpu::initialize_device(opts.device); run_expt(opts); } return 0; } bool read_data_basis(const std::string& fname, float& c, float& sigma, Matrix& Binit, Matrix& X, Matrix& S, Matrix& Bout) { // read matrices FILE* inf = fopen(fname.c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } fscanf(inf,"c %g\n",&c); std::cerr << "c " << c << std::endl; fscanf(inf,"sigma %g\n",&sigma); std::cerr << "sigma " << sigma << std::endl; if(!read_matrix(inf,Binit,"Binit",false)) { cout << "Error reading matrix Binit\n"; return false; } if(!read_matrix(inf,X,"X",false)) { cout << "Error reading matrix X\n"; return false; } if(!read_matrix(inf,S,"S",false)) { cout << "Error reading matrix S\n"; return false; } if(!read_matrix(inf,Bout,"Bout",false)) { cout << "Error reading matrix Bout\n"; return false; } fclose(inf); return true; } void run_expt_basis(const Options& opts) { std::string fname(opts.filename?opts.filename:"data.txt"); Matrix Binit; // A Matrix X; // Y Matrix S; // X dimensions Matrix Bout; // B float c, sigma; if(!read_data_basis(fname,c,sigma,Binit,X,S,Bout)) { std::cerr << "Error reading matrices\n"; return; } int k = num_rows(X); // A is B int m = num_cols(X); // X is S int n = num_cols(Binit); // Y is X Matrix c_bout; init(c_bout,k,n,false); float expt_time=0; { gpu::GpuEventTimer timer(expt_time); proj_grad_descent_cu(c_bout, c, sigma, opts.eta, opts.beta, opts.tol, opts.nbasis_iters, Binit, X, S); } printf("c: %.5f\n",c); printf("sigma: %.5f\n",sigma); printf("k: %d\nm: %d\nn: %d\n",k,m,n); printf("avg_error_in_basis: %f\n", avgdiff(c_bout,Bout)); printf("cublas_time: %f\n",expt_time/1000.0); freeup(c_bout); freeup(Binit); freeup(X); freeup(S); freeup(Bout); return; } int main_basis(int argc, const char** argv) { float expt_time=0; { cpu::CpuEventTimer timer(expt_time); Options opts; read_options(argc, argv, opts); std::string device_name = gpu::initialize_device(opts.device); run_expt_basis(opts); } return 0; } int read_images(string dirname, vector<Matrix*>& imgs) { // cerr << "Reading images\n"; // store in 62 Matrix objects vector<string> img_files; DIR *dir; struct dirent *dirp; if((dir = opendir(dirname.c_str())) == NULL) { cout << "Error(" << errno << ") opening directory: " << dirname<< endl; exit(-1); } while ((dirp = readdir(dir)) != NULL) { string dirpname = string(dirp->d_name); if(dirpname==".") continue; if(dirpname=="..") continue; if(dirpname.find("IMAGES")==string::npos) { cout << "Skipping file: " << dirpname << endl; continue; } string tmp = dirname; tmp.append("/"); tmp.append(dirpname); img_files.push_back(tmp); } closedir(dir); int ret_val=0; for(int i=0;i<img_files.size();i++) { // cerr << img_files[i]<<endl; Matrix* m = new Matrix(); FILE* inf = fopen(img_files[i].c_str(),"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return false; } read_matrix(inf,*m,"",false); // print_matrix(*m,"Image"); ret_val = num_rows(*m); imgs.push_back(m); fclose(inf); } // cerr << imgs.size() <<" images read\n"; // cerr << "Done reading images\n"; return ret_val; } bool write_basis(const Options& opts, string tag, Matrix& B, int iepoch) { stringstream tmpstr; tmpstr << opts.basisdir; tmpstr << "/basis_" << tag << iepoch << ".txt"; FILE* ouf = fopen(tmpstr.str().c_str(),"wt"); if(ouf==0) { cerr << "Cannot open file "<< tmpstr.str(); return false; } // cerr << "B num rows: " << num_rows(B); // cerr << "B num cols: " << num_cols(B); for(int i=0;i<num_rows(B);++i) { for(int j=0;j<num_cols(B);++j) { fprintf(ouf,"%g ",get_val(B,i,j)); } fprintf(ouf,"\n"); } fclose(ouf); return true; } void get_images_as_input_matrix(Matrix& bigX, const Options& opts) { int k = opts.k; int m = opts.mb; // 1) read all images into memory... vector<Matrix*> imgs; int img_size = read_images(opts.imagesdir,imgs); // 2) arrange images into X matrix: in random patches ... int dim = int(sqrt(opts.k)); int nbatches = opts.nb; // first pick random image, then pick random patch in it // repeat this for 1000 images // repeat this 100 times or so int buffer = dim; // Initialize B and S to random values init(bigX, k, m*nbatches, false); for(size_t iim=0;iim< m*nbatches; ++iim) { // pick random image... int img_index = rand()%imgs.size(); // in the range 0 to imgs.size() // now pick in the range between buffer and img_size-buffer-dim const Matrix& img = *(imgs[img_index]); int upper = img_size-buffer-dim; int lower = buffer; int rr_pos = rand()%(upper-lower)+lower; int rc_pos = rand()%(upper-lower)+lower; int r_in_x=0; for(size_t c=0;c<dim;++c) { for(size_t r=0;r<dim;r++) { set_val(bigX,r_in_x++,iim,get_val(img,rr_pos+r,rc_pos+c)); } } } for(size_t i=0;i<imgs.size();++i) { freeup(*imgs[i]); delete imgs[i]; } } void run_expt_together(const Matrix& bigX, const Options& opts) { int k = opts.k; int m = opts.mb; int n = opts.n; int nbatches = opts.nb; cerr << "basis_iters: " << opts.nbasis_iters << endl; cerr << "eta: " << opts.eta << endl; cerr << "beta: " << opts.beta << endl; cerr << "sigma: " << opts.sigma << endl; cerr << "k: " << k << endl; cerr << "m: " << m << endl; cerr << "n: " << n << endl; cerr << "nb: " << nbatches << endl; cublasInit(); gpu::checkErrors(); float gamma = 2*opts.sigma*opts.sigma*opts.beta; cerr << "gamma: " << gamma << endl; float *B_on_dev, *BtB_on_dev, *X_on_dev, *XtB_on_dev, *S_on_dev; onetime_setup(k,m,n,gamma, &B_on_dev, &BtB_on_dev, &X_on_dev, &XtB_on_dev, &S_on_dev); gpu::checkErrors(); Matrix B; init(B, k, n, false); for(int i=0;i<k;i++) { for(int j=0;j<n;j++) { set_val(B,i,j,((float)rand()/(float)RAND_MAX)-0.5); } } for(int j=0;j<n;j++) { float col_avg = 0; for(int i=0;i<k;i++) { col_avg += get_val(B,i,j); } col_avg /= (float)(k); for(int i=0;i<k;i++) { set_val(B,i,j,get_val(B,i,j)-col_avg); } float col_norm = 0; for(int i=0;i<k;i++) { col_norm += get_val(B,i,j)*get_val(B,i,j); } col_norm = sqrt(col_norm); for(int i=0;i<k;i++) { set_val(B,i,j,get_val(B,i,j)/col_norm); } } cutilSafeCall(cudaMemcpy(B_on_dev,B.values,k*n*sizeof(float),cudaMemcpyHostToDevice)); gpu::checkErrors(); float *SSt2_on_dev, *XSt2_on_dev, *G_on_dev, *X_BS_on_dev; onetime_setup_pg(k, m, n, &SSt2_on_dev,&XSt2_on_dev,&G_on_dev,&X_BS_on_dev); gpu::checkErrors(); gpu::checkCublasError(); std::vector<Matrix*> ss; cerr << "iepoch, fobj, coeffs_time, basis_time, avg_nnz\n"; for(int iepoch=0;iepoch<opts.nepoch; iepoch++) { float fobj=0; float coeff_time=0, basis_time=0; int nonzeros = 0; for(int ibatch=0;ibatch<nbatches;++ibatch) { Matrix X; X.row_contiguous = false; X.num_ptrs = m; X.num_vals = k; X.values = bigX.values+(ibatch*m*k); Matrix* S=0; if(ss.size()<(ibatch+1)) { S = new Matrix(); init(*S, n, m, false); ss.push_back(S); for(int i=0;i<n;i++) { for(int j=0;j<m;j++) { set_val(*S,i,j,0); } } } else { // Reusing S for batch... S = ss[ibatch]; } // Repeat until convergence of original cost function: // load S, X cutilSafeCall(cudaMemcpy(S_on_dev,S->values, n*m*sizeof(float),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(X_on_dev,X.values, k*m*sizeof(float),cudaMemcpyHostToDevice)); gpu::checkErrors(); gpu::checkCublasError(); // 3) solve for coefficients using fixed B float l1ls_time=0; { gpu::GpuEventTimer timer(l1ls_time); l1ls_coord_descent_cu_basic(k, m, n, B_on_dev, BtB_on_dev, X_on_dev, XtB_on_dev, S_on_dev); gpu::checkErrors(); gpu::checkCublasError(); } coeff_time +=l1ls_time; cutilSafeCall(cudaMemcpy((void *)(S->values),(const void *)(S_on_dev), n*m*sizeof(float),cudaMemcpyDeviceToHost)); gpu::checkErrors(); gpu::checkCublasError(); fobj += calc_objective(k,m,n,opts.sigma,opts.beta,B_on_dev,S_on_dev,X_on_dev,X_BS_on_dev); // 4) solve for basis using fixed S float b_time=0; { gpu::GpuEventTimer timer(b_time); float tmp = proj_grad_descent_cu_basic (opts.c,opts.sigma, opts.eta, opts.beta, opts.nbasis_iters, k,m,n, B_on_dev,X_on_dev, S_on_dev,SSt2_on_dev,XSt2_on_dev,G_on_dev,X_BS_on_dev); gpu::checkErrors(); gpu::checkCublasError(); } basis_time +=b_time; nonzeros += nnz(*S); } cerr << iepoch+1 << ", " << fobj/((float)(nbatches*m)) << ", " << coeff_time/1000.0 << ", " << basis_time/1000.0 << ", " << (float)nonzeros/(float)(nbatches*m*n) << endl; cutilSafeCall(cudaMemcpy((void *)(B.values),(const void *)(B_on_dev), k*n*sizeof(float),cudaMemcpyDeviceToHost)); write_basis(opts,"",B,iepoch); } cutilSafeCall(cudaMemcpy((void *)(B.values),(const void *)(B_on_dev), k*n*sizeof(float),cudaMemcpyDeviceToHost)); onetime_teardown(B_on_dev, BtB_on_dev, X_on_dev, XtB_on_dev, S_on_dev); onetime_teardown_pg(SSt2_on_dev, XSt2_on_dev, G_on_dev, X_BS_on_dev); cublasShutdown(); for(size_t i=0;i<ss.size();++i) { freeup(*ss[i]); delete ss[i]; } } void run_expt_on_natural_images(const Options& opts) { cerr << "Getting basis from natural images\n"; Matrix bigX; get_images_as_input_matrix(bigX,opts); run_expt_together(bigX,opts); freeup(bigX); } void get_digits_as_input_matrix(Matrix& bigX, const Options& opts) { cerr << "Reading " << opts.filename << endl; FILE* inf = fopen(opts.filename,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,196,60000,bigX,false); } void run_expt_on_digits(const Options& opts) { cerr << "Getting basis from digits\n"; Matrix bigX; get_digits_as_input_matrix(bigX,opts); run_expt_together(bigX,opts); freeup(bigX); } void get_letters_as_input_matrix(Matrix& bigX, const Options& opts) { cerr << "Reading " << opts.filename << endl; FILE* inf = fopen(opts.filename,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,196,52152,bigX,false); } void get_letters_labels(Matrix& labels, const Options& opts) { cerr << "Reading letters label file: " << opts.labelfile << endl; if(opts.labelfile==0) { cerr << "Specify letters label file\n"; return; } FILE* inf = fopen(opts.labelfile,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,52152,1,labels,false); } void read_basis(Matrix& B, const Options& opts) { cerr << "Reading basis file: " << opts.basisfile << endl; if(opts.basisfile==0) { cerr << "Please specify basis file for coefficient computation\n"; return; } FILE* inf = fopen(opts.basisfile,"rt"); if(inf==0) { std::cerr << "Cannot open file\n"; return; } read_matrix(inf,opts.k,opts.n,B,false); } void write_svm_light_data(string filename, const Matrix& X, const Matrix& labels) { // write in svmlight recognizable format: FILE* inf = fopen(filename.c_str(),"w"); for(size_t ic=0; ic<num_cols(X); ic++) { // printf("Label: %g\n",get_val(labels,ic,0)+1); fprintf(inf,"%g",get_val(labels,ic,0)+1); for(size_t ir=0;ir<num_rows(X); ir++) { float val = get_val(X,ir,ic); if(val>1e-14) { // svmlight wants >=1 feature values. fprintf(inf," %d:%g",ir+1,val); } } fprintf(inf,"\n"); } fclose(inf); } void write_test_train_data(const Matrix& testX, const Matrix& testLabels, const Matrix& trainX,const Matrix& trainLabels, const Options& opts) { cerr << "Writing train.dat\n"; write_svm_light_data("train.dat",trainX,trainLabels); cerr << "Writing test.dat\n"; write_svm_light_data("test.dat",testX,testLabels); } int randint(int l, int u) { return l + rand()%(u-l+1); } void partition_into_train_test(Matrix& trainX, Matrix& trainLabels, Matrix& testX, Matrix& testLabels, const Matrix& inp, const Matrix& inpl, const Options& opts) { // cerr << "RANDMAX: " << RAND_MAX << endl; // go through matrix and pick randomly 100 columns... int numtrain=opts.numtrain; int m = num_cols(inp); cerr << "Paritionning into " << numtrain << " train and " << m-numtrain << " test\n"; int k = num_rows(inp); set<int> traincols; while(traincols.size()<numtrain) { int random = randint(0,m-1); traincols.insert(random); } init(trainX,k,numtrain,false); init(testX,k,m-numtrain,false); init(trainLabels,numtrain,1,false); init(testLabels,m-numtrain,1,false); int ictrain=0, ictest=0; for(size_t ic=0; ic<m; ++ic) { if(traincols.find(ic)!=traincols.end()) { for(size_t ir=0; ir<k; ++ir) { set_val(trainX,ir,ictrain,get_val(inp,ir,ic)); } set_val(trainLabels,ictrain,0,get_val(inpl,ic,0)); ictrain++; } else { for(size_t ir=0; ir<k; ++ir) { float val = get_val(inp,ir,ic); set_val(testX,ir,ictest,val); } set_val(testLabels,ictest,0,get_val(inpl,ic,0)); ictest++; } } } void find_coeffs(Matrix& S, const Matrix& X, const Matrix& B, const Options& opts) { cerr << "Finding coefficients for given inputs\n"; int m = num_cols(X); int n = num_cols(B); int k = num_rows(X); float gamma = 2*opts.sigma*opts.sigma*opts.beta; cerr << "k: " << k << endl; cerr << "m: " << m << endl; cerr << "n: " << n << endl; cerr << "gamma: " << gamma << endl; init(S,n,m,false); std::string device_name = gpu::initialize_device(opts.device); float expt_time=0; { gpu::GpuEventTimer timer(expt_time); l1ls_coord_descent_cu(S, gamma, B, X); gpu::checkErrors(); } } void find_coeffs_for_letters(const Options& opts) { cerr << "Getting coeffs for letters\n"; Matrix inp, inpl; get_letters_as_input_matrix(inp,opts); get_letters_labels(inpl, opts); Matrix testX, trainX, testLabels, trainLabels; partition_into_train_test(trainX,trainLabels,testX,testLabels,inp,inpl,opts); Matrix B,S; read_basis(B,opts); find_coeffs(S,trainX,B,opts); write_test_train_data(testX,testLabels,trainX,trainLabels,opts); freeup(testX); freeup(trainX); freeup(inp); freeup(testLabels); freeup(trainLabels); freeup(inpl); freeup(B); freeup(S); } int main(int argc, const char** argv) { Options opts; read_options(argc,argv,opts); if(opts.runwhat==0) { cerr << "Run what?\n"; print_usage(argv); exit(1); } if(strcmp(opts.runwhat,"coeffs")==0) { cerr << "Just coeffs...\n"; main_coeffs(argc,argv); return 0; } else if(strcmp(opts.runwhat,"natural")==0) { cerr << "On natural images...\n"; if(opts.imagesdir==0) { print_usage(argv); std::cerr << "Specify input images directory\n"; return 0; } if(opts.basisdir==0) { print_usage(argv); std::cerr << "Specify output basis directory\n"; return 0; } cerr << "Writing to basis dir: " << opts.basisdir << endl; std::string device_name = gpu::initialize_device(opts.device); cerr << "Initialized device: " << device_name<<endl; run_expt_on_natural_images(opts); return 0; } else if(strcmp(opts.runwhat,"digits")==0) { cerr << "On digits...\n"; if(opts.filename==0) { print_usage(argv); std::cerr << "Cannot open file\n"; return 0; } if(opts.basisdir==0) { print_usage(argv); std::cerr << "Specify output basis directory\n"; return 0; } cerr << "Writing to basis dir: " << opts.basisdir << endl; run_expt_on_digits(opts); return 0; } else if(strcmp(opts.runwhat,"basis")==0) { cerr << "Just basis...\n"; main_basis(argc,argv); } else if(strcmp(opts.runwhat,"findcoeffs")==0) { cerr << "Finding coefficients\n"; find_coeffs_for_letters(opts); return 0; } return 0; }
7db1607602cda727fe34f0a6e72e147516ea5ce7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void MaxPoolForward(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } static const int BACKWARD_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 8) #endif __global__ void MaxPoolBackward(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; //get some templating performance benefits without actually templating int phstart, phend, pwstart, pwend; if (stride_h == 1) { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) + 1; phend = min((h + pad_h) + 1, pooled_height); } else if (stride_h == 2) { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / 2 + 1; phend = min((h + pad_h) / 2 + 1, pooled_height); } else { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / stride_h + 1; phend = min((h + pad_h) / stride_h + 1, pooled_height); } if (stride_w == 1) { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) + 1; pwend = min((w + pad_w) + 1, pooled_width); } else if (stride_w == 2) { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / 2 + 1; pwend = min((w + pad_w) / 2 + 1, pooled_width); } else { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / stride_w + 1; pwend = min((w + pad_w) / stride_w + 1, pooled_width); } for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; //get some templating performance benefits without actually templating if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BACKWARD_THREADS); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); hipLaunchKernelGGL(( MaxPoolForward<scalar_t, scalar_t>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", hipGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const Tensor input = input_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); int64_t count = input.numel(); dim3 grid; int imgcount = inputWidth * inputHeight; const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS; grid.x = blocks; grid.y = nbatch; grid.z = nInputPlane; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridY < grid.y) grid.y = maxGridY; if (maxGridZ < grid.z) grid.z = maxGridZ; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); hipLaunchKernelGGL(( MaxPoolBackward<scalar_t, accscalar_t>) , dim3(grid), dim3(BACKWARD_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); } ); TORCH_CHECK(hipGetLastError() == hipSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", hipGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
7db1607602cda727fe34f0a6e72e147516ea5ce7.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void MaxPoolForward(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; bottom_data += (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = bottom_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } static const int BACKWARD_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BACKWARD_THREADS, 8) #endif __global__ void MaxPoolBackward(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index/width; int w = index - h * width; //get some templating performance benefits without actually templating int phstart, phend, pwstart, pwend; if (stride_h == 1) { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) + 1; phend = min((h + pad_h) + 1, pooled_height); } else if (stride_h == 2) { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / 2 + 1; phend = min((h + pad_h) / 2 + 1, pooled_height); } else { phstart = (h + pad_h < ((kernel_h - 1) * dilation_h + 1)) ? 0 : (h + pad_h - ((kernel_h - 1) * dilation_h + 1)) / stride_h + 1; phend = min((h + pad_h) / stride_h + 1, pooled_height); } if (stride_w == 1) { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) + 1; pwend = min((w + pad_w) + 1, pooled_width); } else if (stride_w == 2) { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / 2 + 1; pwend = min((w + pad_w) / 2 + 1, pooled_width); } else { pwstart = (w + pad_w < ((kernel_w - 1) * dilation_w + 1)) ? 0 : (w + pad_w - ((kernel_w - 1) * dilation_w + 1)) / stride_w + 1; pwend = min((w + pad_w) / stride_w + 1, pooled_width); } for (int n = blockIdx.y; n < num; n += gridDim.y) for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; top_diff += offset; top_mask += offset; //get some templating performance benefits without actually templating if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw]); } } } } else { if (top_mask[phstart * pooled_width + pwstart] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[phstart * pooled_width + pwstart]); } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int count = safe_downcast<int, int64_t>(output.numel()); const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BACKWARD_THREADS); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); MaxPoolForward<scalar_t, scalar_t> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "max_pool2d_with_indices_out_cuda_frame failed with error code ", cudaGetLastError()); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); const Tensor input = input_.contiguous(); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(); gradInput.resize_as_(input); int64_t count = input.numel(); dim3 grid; int imgcount = inputWidth * inputHeight; const int blocks = (imgcount + BACKWARD_THREADS - 1) / BACKWARD_THREADS; grid.x = blocks; grid.y = nbatch; grid.z = nInputPlane; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridY < grid.y) grid.y = maxGridY; if (maxGridZ < grid.z) grid.z = maxGridZ; AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); MaxPoolBackward<scalar_t, accscalar_t> <<<grid, BACKWARD_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); } ); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "fractional_max_pool2d_backward_out_cuda failed with error code ", cudaGetLastError()); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
73dd786d6bd16cb6ddbb0bc8ffc0e7adadb9b027.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" #include "stdio.h" #include <string.h> #include "rocblas.h" #pragma comment(lib,"cublas.lib") #define blocksize 32 #define THREAD_NUM 512 #define BLOCK_NUM 2048 __global__ void Im2col_SAME(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int in_channel,\ int output_channel,int pad_needed_height,int pad_needed_width,int new_height,int new_width,int padheight,int padwidth) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i,j,ii,jj,pp,qq,index,t,flag; int height=padheight-pad_needed_height,width=padwidth-pad_needed_width; for(int u=tid+bid*THREAD_NUM;u<c*d*in_channel*new_height*new_width*batchsize;u+= BLOCK_NUM*THREAD_NUM) { i=u/(c*d*in_channel); j=u%(c*d*in_channel); ii=j/(c*d);//in_channel jj=i/(new_height*new_width);//batchsize pp=j%(c*d); qq=i%(new_height*new_width); index=pp/c*padheight+pp%c+qq/new_height*padheight*a+qq%new_height*b; if((index%padheight-pad_needed_height/2)<0||(index%padheight-pad_needed_height/2)>=height||(index/padheight-pad_needed_width/2)<0||(index/padheight-pad_needed_width/2)>=width) Res_In[u]=0; else{ flag=(index/padheight-pad_needed_width/2)*height+index%padheight-pad_needed_height/2; t=ii*height*width*batchsize+jj*height*width+flag; Res_In[u]=In[t];} } } __global__ void Im2col_VALID(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int in_channel,\ int output_channel,int no_needed_height,int no_needed_width,int new_height,int new_width,int padheight,int padwidth) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i,j,ii,jj,pp,qq,index,t; int height=no_needed_height+padheight,width=no_needed_width+padwidth; for(int u=tid+bid*THREAD_NUM;u<c*d*in_channel*new_height*new_width*batchsize;u+= BLOCK_NUM*THREAD_NUM) { i=u/(c*d*in_channel); j=u%(c*d*in_channel); ii=j/(c*d);//in_channel jj=i/(new_height*new_width);//batchsize pp=j%(c*d); qq=i%(new_height*new_width); index=pp/c*height+pp%c+qq/new_height*height*a+qq%new_height*b; t=ii*height*width*batchsize+jj*height*width+index; Res_In[u]=In[t]; } } __global__ void AddBias(float *dev,float *bias,int new_height,int new_width,int batchsize,int output_channel) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i; __shared__ float shared[THREAD_NUM]; shared[tid]=0; for(int u=tid+bid*(new_height*new_width*batchsize);u<(new_height*new_width*batchsize)*(bid+1);u+= THREAD_NUM) { shared[tid]+=dev[u]; } __syncthreads(); if(tid==0) { for(i=1;i<THREAD_NUM;i++) { shared[0]+=shared[i]; } bias[bid]=shared[0]; } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* [dw,db]=dilconv2d(input,dev,strides,padding,wsize,bsize) input=[height ,width ,batchsize ,in_channels] (inputvalidsamepadding) dev=[new_height ,new_width ,batchsize ,output_channels] output=[filter_height , filter_width ,in_channels, output_channels] a=strides(1);b=strides(2);c=size(w,1);d=size(w,2); */ const size_t *dim_array = mxGetDimensions(prhs[0]); int height=*dim_array,width=*(dim_array+1),batchsize=1,in_channel=1; int number_of_dims = mxGetNumberOfDimensions(prhs[0]); if(number_of_dims==3) batchsize=*(dim_array+2); if(number_of_dims==4) {batchsize=*(dim_array+2); in_channel=*(dim_array+3);} const size_t *dim_array1 = mxGetDimensions(prhs[1]); int new_height=*dim_array1,new_width=*(dim_array1+1),output_channel=1; int number_of_dims1 = mxGetNumberOfDimensions(prhs[1]); if(number_of_dims1==4) output_channel=*(dim_array1+3); double *s; s=mxGetPr(prhs[2]); int a=int(*s),b=int(*(s+1)); char *padding=mxArrayToString(prhs[3]); double *wsize,*bsize; wsize=mxGetPr(prhs[4]); bsize=mxGetPr(prhs[5]); int c=int(*wsize),d=int(*(wsize+1)); float *A=(float*)mxGetPr(prhs[0]);//input float *B=(float*)mxGetPr(prhs[1]);// float *dev,*output,*In,*Res_In,*bias; int padheight=c+new_height+(new_height-1)*(a-1)-1,padwidth=d+new_width+(new_width-1)*(b-1)-1,\ pad_needed_height,pad_needed_width,no_needed_height,no_needed_width; size_t size_1,size_2,size_3,size_4; size_1=c*d*in_channel*new_height*new_width*batchsize*sizeof(float); size_2=height*width*batchsize*in_channel*sizeof(float); size_3=new_height*new_width*batchsize*output_channel*sizeof(float); size_4=c*d*in_channel*output_channel*sizeof(float); hipMalloc((void**)&In,size_2); hipMalloc((void**)&Res_In,size_1); hipMemcpy(In,A , size_2, hipMemcpyHostToDevice); if(strcmp(padding,"SAME")==0) { pad_needed_height=padheight-height; pad_needed_width=padwidth-width; Im2col_SAME<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,in_channel,output_channel,pad_needed_height,pad_needed_width,new_height,new_width,padheight,padwidth); hipDeviceSynchronize(); } if(strcmp(padding,"VALID")==0) { no_needed_height=height-padheight; no_needed_width=width-padwidth; Im2col_VALID<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,in_channel,output_channel,no_needed_height,no_needed_width,new_height,new_width,padheight,padwidth); hipDeviceSynchronize(); } hipFree(In); // hipMalloc((void**)&dev,size_3); hipMemcpy(dev,B , size_3, hipMemcpyHostToDevice); hipMalloc((void**)&output,size_4); int L_rows=c*d*in_channel,L_cols=new_height*new_width*batchsize,R_cols=output_channel; /* dim3 dimBlock(blocksize, blocksize); OutputMatrix<< <BLOCK_NUM,dimBlock>> >(Res_In,dev,output,L_rows,L_cols,R_cols); */ float alpha=1,beta=0; hipblasHandle_t handle; hipblasCreate(&handle); hipblasSgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N,L_rows,R_cols,L_cols,&alpha,Res_In, L_rows,dev,L_cols,&beta,output,L_rows); hipblasDestroy(handle); //hipDeviceSynchronize(); hipFree(Res_In); // hipMalloc((void**)&bias,output_channel*sizeof(float)); AddBias<< <output_channel,THREAD_NUM>> >(dev,bias,new_height,new_width,batchsize,output_channel); hipFree(dev); // const size_t dim[]={c,d,in_channel, output_channel}; plhs[0] = mxCreateNumericArray(4,dim ,mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(plhs[0]), output, size_4, hipMemcpyDeviceToHost); hipFree(output); const size_t dim1[]={1,output_channel}; plhs[1] = mxCreateNumericArray(2,dim1 ,mxSINGLE_CLASS, mxREAL); hipMemcpy((float*)mxGetPr(plhs[1]), bias, output_channel*sizeof(float), hipMemcpyDeviceToHost); hipFree(bias); }
73dd786d6bd16cb6ddbb0bc8ffc0e7adadb9b027.cu
#include "mex.h" #include "stdio.h" #include <string.h> #include "cublas_v2.h" #pragma comment(lib,"cublas.lib") #define blocksize 32 #define THREAD_NUM 512 #define BLOCK_NUM 2048 __global__ void Im2col_SAME(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int in_channel,\ int output_channel,int pad_needed_height,int pad_needed_width,int new_height,int new_width,int padheight,int padwidth) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i,j,ii,jj,pp,qq,index,t,flag; int height=padheight-pad_needed_height,width=padwidth-pad_needed_width; for(int u=tid+bid*THREAD_NUM;u<c*d*in_channel*new_height*new_width*batchsize;u+= BLOCK_NUM*THREAD_NUM) { i=u/(c*d*in_channel); j=u%(c*d*in_channel); ii=j/(c*d);//位于哪个in_channel jj=i/(new_height*new_width);//位于哪个batchsize pp=j%(c*d); qq=i%(new_height*new_width); index=pp/c*padheight+pp%c+qq/new_height*padheight*a+qq%new_height*b; if((index%padheight-pad_needed_height/2)<0||(index%padheight-pad_needed_height/2)>=height||(index/padheight-pad_needed_width/2)<0||(index/padheight-pad_needed_width/2)>=width) Res_In[u]=0; else{ flag=(index/padheight-pad_needed_width/2)*height+index%padheight-pad_needed_height/2; t=ii*height*width*batchsize+jj*height*width+flag; Res_In[u]=In[t];} } } __global__ void Im2col_VALID(float *In,float *Res_In,int a,int b,int c,int d,int batchsize,int in_channel,\ int output_channel,int no_needed_height,int no_needed_width,int new_height,int new_width,int padheight,int padwidth) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i,j,ii,jj,pp,qq,index,t; int height=no_needed_height+padheight,width=no_needed_width+padwidth; for(int u=tid+bid*THREAD_NUM;u<c*d*in_channel*new_height*new_width*batchsize;u+= BLOCK_NUM*THREAD_NUM) { i=u/(c*d*in_channel); j=u%(c*d*in_channel); ii=j/(c*d);//位于哪个in_channel jj=i/(new_height*new_width);//位于哪个batchsize pp=j%(c*d); qq=i%(new_height*new_width); index=pp/c*height+pp%c+qq/new_height*height*a+qq%new_height*b; t=ii*height*width*batchsize+jj*height*width+index; Res_In[u]=In[t]; } } __global__ void AddBias(float *dev,float *bias,int new_height,int new_width,int batchsize,int output_channel) { const int tid=threadIdx.x; const int bid=blockIdx.x; int i; __shared__ float shared[THREAD_NUM]; shared[tid]=0; for(int u=tid+bid*(new_height*new_width*batchsize);u<(new_height*new_width*batchsize)*(bid+1);u+= THREAD_NUM) { shared[tid]+=dev[u]; } __syncthreads(); if(tid==0) { for(i=1;i<THREAD_NUM;i++) { shared[0]+=shared[i]; } bias[bid]=shared[0]; } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { /* [dw,db]=dilconv2d(input,dev,strides,padding,wsize,bsize) input=[height ,width ,batchsize ,in_channels] (input变为在前向计算中用到的部分,前向valid没用到的扔掉,前向same中padding的补上) dev=[new_height ,new_width ,batchsize ,output_channels] output=[filter_height , filter_width ,in_channels, output_channels] a=strides(1);b=strides(2);c=size(w,1);d=size(w,2); */ const size_t *dim_array = mxGetDimensions(prhs[0]); int height=*dim_array,width=*(dim_array+1),batchsize=1,in_channel=1; int number_of_dims = mxGetNumberOfDimensions(prhs[0]); if(number_of_dims==3) batchsize=*(dim_array+2); if(number_of_dims==4) {batchsize=*(dim_array+2); in_channel=*(dim_array+3);} const size_t *dim_array1 = mxGetDimensions(prhs[1]); int new_height=*dim_array1,new_width=*(dim_array1+1),output_channel=1; int number_of_dims1 = mxGetNumberOfDimensions(prhs[1]); if(number_of_dims1==4) output_channel=*(dim_array1+3); double *s; s=mxGetPr(prhs[2]); int a=int(*s),b=int(*(s+1)); char *padding=mxArrayToString(prhs[3]); double *wsize,*bsize; wsize=mxGetPr(prhs[4]); bsize=mxGetPr(prhs[5]); int c=int(*wsize),d=int(*(wsize+1)); float *A=(float*)mxGetPr(prhs[0]);//传入input float *B=(float*)mxGetPr(prhs[1]);//传入误差矩阵 float *dev,*output,*In,*Res_In,*bias; int padheight=c+new_height+(new_height-1)*(a-1)-1,padwidth=d+new_width+(new_width-1)*(b-1)-1,\ pad_needed_height,pad_needed_width,no_needed_height,no_needed_width; size_t size_1,size_2,size_3,size_4; size_1=c*d*in_channel*new_height*new_width*batchsize*sizeof(float); size_2=height*width*batchsize*in_channel*sizeof(float); size_3=new_height*new_width*batchsize*output_channel*sizeof(float); size_4=c*d*in_channel*output_channel*sizeof(float); cudaMalloc((void**)&In,size_2); cudaMalloc((void**)&Res_In,size_1); cudaMemcpy(In,A , size_2, cudaMemcpyHostToDevice); if(strcmp(padding,"SAME")==0) { pad_needed_height=padheight-height; pad_needed_width=padwidth-width; Im2col_SAME<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,in_channel,output_channel,pad_needed_height,pad_needed_width,new_height,new_width,padheight,padwidth); cudaThreadSynchronize(); } if(strcmp(padding,"VALID")==0) { no_needed_height=height-padheight; no_needed_width=width-padwidth; Im2col_VALID<< <BLOCK_NUM,THREAD_NUM>> >(In,Res_In,a,b,c,d,batchsize,in_channel,output_channel,no_needed_height,no_needed_width,new_height,new_width,padheight,padwidth); cudaThreadSynchronize(); } cudaFree(In); //矩阵相乘计算权值 cudaMalloc((void**)&dev,size_3); cudaMemcpy(dev,B , size_3, cudaMemcpyHostToDevice); cudaMalloc((void**)&output,size_4); int L_rows=c*d*in_channel,L_cols=new_height*new_width*batchsize,R_cols=output_channel; /* dim3 dimBlock(blocksize, blocksize); OutputMatrix<< <BLOCK_NUM,dimBlock>> >(Res_In,dev,output,L_rows,L_cols,R_cols); */ float alpha=1,beta=0; cublasHandle_t handle; cublasCreate(&handle); cublasSgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N,L_rows,R_cols,L_cols,&alpha,Res_In, L_rows,dev,L_cols,&beta,output,L_rows); cublasDestroy(handle); //cudaThreadSynchronize(); cudaFree(Res_In); //相加计算偏置 cudaMalloc((void**)&bias,output_channel*sizeof(float)); AddBias<< <output_channel,THREAD_NUM>> >(dev,bias,new_height,new_width,batchsize,output_channel); cudaFree(dev); //输出 const size_t dim[]={c,d,in_channel, output_channel}; plhs[0] = mxCreateNumericArray(4,dim ,mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(plhs[0]), output, size_4, cudaMemcpyDeviceToHost); cudaFree(output); const size_t dim1[]={1,output_channel}; plhs[1] = mxCreateNumericArray(2,dim1 ,mxSINGLE_CLASS, mxREAL); cudaMemcpy((float*)mxGetPr(plhs[1]), bias, output_channel*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(bias); }
8749f669dee85ed8ac23613b1e2746e2f9cfc9e6.hip
// !!! This is a file automatically generated by hipify!!! // // ht_helix.cpp // // // Created by Lorenzo Rinaldi on 29/04/14. // // // compile: // nvcc -I/usr/local/cuda-5.5/samples/common/inc -I/usr/local/cuda-5.5/targets/x86_64-linux/include -gencode arch=compute_20,code=sm_21 -o ht_rhophi ht_rhophi.cu //NOTE: INVERTITE DIMENSIONI NRHO-NPHI PER ACCESSO MATRICE #include <hip/hip_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper utility functions #include "simpleIndexing.cu" #include <string.h> #include <cmath> #include <algorithm> #include <vector> #include <iostream> #include <fstream> #include <sstream> #include <unistd.h> using namespace std; #define NHMAX 300 #define Nsec 4 // Numero settori in piano trasverso #define Ntheta 16 // Numero settori in piano longitudinale #define Nphi 1024 // Numero bin angolo polare #define Nrho 1024 // Numero bin distanza radiale #define rhomin 500.f // mm #define rhomax 100000.f // mm #define phimin 0.f // rad #define phimax 2*M_PI // rad #define thetamin 0.f // rad #define thetamax M_PI // rad #define ac_soglia 4 // soglia nella matrice di accumulazione /* --- DEFINE TO ALTER EXECUTION --- */ //#define PARALLEL_REDUX_MAX //NOTE: still wrong!! do not use it //#define VERBOSE_DUMP #define CUDA_MALLOCHOST_OUTPUT //#define CUDA_MANAGED_TRANSFER #define max_tracks_out 100 int acc_Mat [ Nsec ][ Ntheta ][ Nrho ] [Nphi ]; //int Max_rel [ Nsec ][ Ntheta ][Nphi ] [Nrho ]; int debug_accMat[ Nsec ][ Ntheta ][ Nrho ] [ Nphi ]; float dtheta= M_PI/Ntheta; float drho= (rhomax-rhomin)/Nrho; float dphi= (phimax-phimin)/Nphi; vector<float> x_values; vector<float> y_values; vector<float> z_values; #define OUT_VIEW_FRAME 3; #ifndef PARALLEL_REDUX_MAX struct track_param{ int acc; /*unsigned int isec; unsigned int ith; unsigned int iphi; unsigned int irho;*/ }; #ifndef CUDA_MALLOCHOST_OUTPUT struct track_param host_out_tracks[ Nsec * Ntheta * Nrho * Nphi ]; #endif #endif //lock definition #ifndef __LOCK_H__ #define __LOCK_H__ struct Lock { int *mutex; Lock( void ) { hipMalloc( (void**)&mutex, sizeof(int) ) ; hipMemset( mutex, 0, sizeof(int) ); } ~Lock( void ) { hipFree( mutex ); } __device__ void lock( void ) { while( atomicCAS( mutex, 0, 1 ) != 0 ); } __device__ void unlock( void ) { atomicExch( mutex, 0 ); } }; #endif //end lock void read_inputFile(string file_path, unsigned int num_hits); // CUDA timer macros hipEvent_t c_start, c_stop; inline void start_time() { hipEventCreate(&c_start); hipEventCreate(&c_stop); hipEventRecord(c_start, 0); } inline float stop_time(const char *msg) { float elapsedTime = 0; hipEventRecord(c_stop, 0); hipEventSynchronize(c_stop); hipEventElapsedTime(&elapsedTime, c_start, c_stop); //printf("Time to %s: %.3f ms\n", msg, elapsedTime); hipEventDestroy(c_start); hipEventDestroy(c_stop); return elapsedTime; } //#define floatToInt(x) (((x) >= 0) ? (int)((x) + 0.5) : (int)((x) - 0.5)) #define get4DIndex(s,t,r,p) ((s)*(Ntheta*Nrho*Nphi))+(((t)*Nrho*Nphi) +(((r)*Nphi)+(p))) #define get2DIndex(r,p) (((r)*Nphi)+(p)) __global__ void voteHoughSpace(float *dev_x_values, float *dev_y_values, float *dev_z_values, int *dev_accMat, float dtheta, float drho, float dphi){ __shared__ float x_val; __shared__ float y_val; __shared__ float z_val; if(threadIdx.x == 0){ x_val = dev_x_values[blockIdx.x]; y_val = dev_y_values[blockIdx.x]; z_val = dev_z_values[blockIdx.x]; } __syncthreads(); float R2 = x_val*x_val + y_val*y_val; float theta=acos(z_val/sqrt(R2+z_val*z_val)); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_val,x_val); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor((sec/2/M_PI*Nsec)); int iphi = threadIdx.x; float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_val*cos(phi)+y_val*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); int accu_index = get4DIndex(isec, ith, irho, iphi);//(isec*(Ntheta*Nphi*Nrho))+((ith*Nphi*Nrho) +((iphi*Nrho)+irho)); if (rho<=rhomax && rho>rhomin) { atomicAdd(&(dev_accMat[accu_index]),1); } } #ifndef PARALLEL_REDUX_MAX __global__ void findRelativeMax_withShared(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ extern __shared__ int SH_local_accMat[]; //check if it is a local maxima by verifying that it is greater then (>=) its neighboors /*unsigned int index_Y0 = get2DIndex(0,iphi); unsigned int index_Y1 = get2DIndex(1,iphi); unsigned int index_Y2 = get2DIndex(2,iphi);*/ unsigned int index_Y1 = iphi; SH_local_accMat[index_Y1] = dev_accMat[get4DIndex(isec, ith, irho, iphi)]; //save into shared memory this thread accumulator //In order to avoid oppressing global memory access, we delegate upper and lower rows, irho+1 and irho-1, loading into shared memory //only to those threads which passes the first "cut" on threshold //__syncthreads(); //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ if (SH_local_accMat[index_Y1] >= ac_soglia){ //we're sure that each thread has its own acc saved in shared memory /*SH_local_accMat[index_Y0] = dev_accMat[get4DIndex(isec, ith, irho-1, iphi)]; SH_local_accMat[index_Y2] = dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]; __syncthreads();*/ //NOTE: since we only access once (irho-1,iphi) and (irho+1,iphi) for this computation, and there isn't any reuse for other //threads of these informations, we don't need to put the other two rows in shared memory //(x,y) > (x,y-1) && (x,y) >= (x,y+1) /*if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y0] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y2]){*/ if(SH_local_accMat[index_Y1] > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && SH_local_accMat[index_Y1] >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ //__syncthreads(); //this is just to make sure that all threads had written in the shared memory, before reading each other values //(x,y) > (x-1, y) && (x,y) >= (x+1, y) if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y1-1] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y1+1]){ /*atomicAdd(&local_NMrel, 1);*/ //NOTE atomic op on shared memory are SLOWER than global memory, because they're implemented in software atomicAdd(NMrel, 1); dev_output[globalIndex].acc = SH_local_accMat[index_Y1]; //dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ } } } } } __global__ void findRelativeMax(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ //check if it is a local maxima by verifying that it is greater then (>=) its neighboors //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ //each thread is assigned to one point of the accum. matrix: int acc= dev_accMat[get4DIndex(isec, ith, irho, iphi)]; if (acc >= ac_soglia){ if(acc > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && acc >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ if(acc > dev_accMat[get4DIndex(isec, ith, irho, iphi-1)] && acc >= dev_accMat[get4DIndex(isec, ith, irho, iphi+1)]){ /*atomicAdd(&local_NMrel, 1); if(threadIdx.x == 0){ mutex.lock(); *NMrel += local_NMrel; mutex.unlock(); }*/ atomicAdd(NMrel, 1); //mutex.lock(); dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ //mutex.unlock(); } } } } } #else //NOTE: wrong approach to solve this problem //TODO: improve as on slides __global__ void reduceParallelMax(int *dev_accMat, int *dev_output, int *dev_maxRelOutput, unsigned int N){ extern __shared__ int sdata[]; int* max_sdata = (int *) sdata; int* relMax_sdata = (int *) &sdata[blockDim.x]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; //local index //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; //global index (1D grid - 1D block) unsigned int i = getGlobalIdx_2D_1D(); if(i < N){ //check if thread is in data bounds max_sdata[tid] = dev_accMat[i]; relMax_sdata[tid] = dev_accMat[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=1; s < blockDim.x; s*=2){ if(tid % (2*s) == 0){ //it is for a different stride //atomicMax(&(max_sdata[tid]),max_sdata[tid+s]); //TODO: change without atomic max_sdata[tid] = (max_sdata[tid] > max_sdata[tid+s]) ? max_sdata[tid] : max_sdata[tid+s]; __syncthreads(); } __syncthreads(); } //write results (now found in the first element of the array) for this block to global memory //if(tid == 0) dev_output[blockIdx.x] = sdata[0]; if(tid == 0) dev_output[blockIdx.x] = max_sdata[0]; //at sdata[0], we found the maximum if(relMax_sdata[tid] >= ac_soglia){ dev_maxRelOutput[i] = relMax_sdata[tid]; }else{ dev_maxRelOutput[i] = 0; } } } #endif void help(char* prog) { printf("Use %s [-l #loops] [-n #hitsToRead] [-h] \n\n", prog); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -n hits Number of hits to read from input file (Default: 236).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]){ unsigned int N_LOOPS = 1; unsigned int N_HITS = 236; int c; //getting command line options while ( (c = getopt(argc, argv, "l:n:h")) != -1 ) { switch(c) { case 'n': N_HITS = atoi(optarg); break; case 'l': N_LOOPS = atoi(optarg); break; case 'h': help(argv[0]); return 0; break; default: printf("Unkown option!\n"); help(argv[0]); return 0; } } int GPU_N; checkCudaErrors(hipGetDeviceCount(&GPU_N)); hipDeviceProp_t *deviceProp; deviceProp = (hipDeviceProp_t *) malloc(sizeof(hipDeviceProp_t)*GPU_N); for(unsigned int i = 0; i < GPU_N; i++){ checkCudaErrors(hipGetDeviceProperties(&deviceProp[i], i)); cout << deviceProp[i].name << endl; } #ifndef CUDA_MANAGED_TRANSFER struct track_param *host_out_tracks; start_time(); #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipHostMalloc((void **) &host_out_tracks, (sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)))); #else host_out_tracks = malloc(sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)); #endif float init_outputMatrix = stop_time("init output matrix with hipHostMalloc"); cout << "time to init output matrix (once): " << init_outputMatrix << endl; #endif int *dev_accMat; float *dev_x_values; float *dev_y_values; float *dev_z_values; float *x_values_temp; float *y_values_temp; float *z_values_temp; //executions loop for(unsigned int loop = 0; loop < N_LOOPS; loop++){ float timing[5]; //float R = 0.f; // Inizializzo a zero le matrici memset(&acc_Mat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); memset(&debug_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); //memset(&Max_rel, 0, (sizeof(int)*(Nsec*Ntheta*Nphi*Nrho)) ); //alloc accumulator matrix on GPU start_time(); checkCudaErrors(hipMalloc((void **) &dev_accMat, (sizeof(int)* (Nsec * Ntheta * Nrho*Nphi)) )); checkCudaErrors(hipMemset(dev_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)))); timing[1] = stop_time("malloc dev_accMat and memset(0)"); //riempi i valori dentro x_values , y_values , z_values read_inputFile("hits-5000.txt", N_HITS); // read_inputFile("../datafiles/hits-1.txt"); #ifdef CUDA_MANAGED_TRANSFER int cudaVer = 0; hipRuntimeGetVersion(&cudaVer); if(cudaVer >= 6000){ start_time(); checkCudaErrors(hipMallocManaged(&dev_x_values,sizeof(float)*x_values.size())); checkCudaErrors(hipMallocManaged(&dev_y_values,sizeof(float)*y_values.size())); checkCudaErrors(hipMallocManaged(&dev_z_values,sizeof(float)*z_values.size())); for(unsigned int i = 0; i < x_values.size(); i++){ dev_x_values[i] = x_values.at(i); dev_y_values[i] = y_values.at(i); dev_z_values[i] = z_values.at(i); } timing[0] = stop_time("Input malloc and copy HtoD"); }else{ #endif x_values_temp = (float*) malloc(sizeof(float)*x_values.size()); y_values_temp = (float*) malloc(sizeof(float)*y_values.size()); z_values_temp = (float*) malloc( sizeof(float)*z_values.size()); for(unsigned int i = 0; i < x_values.size(); i++){ x_values_temp[i] = x_values.at(i); y_values_temp[i] = y_values.at(i); z_values_temp[i] = z_values.at(i); } start_time(); checkCudaErrors(hipMalloc((void **) &dev_x_values, sizeof(float)*x_values.size())); checkCudaErrors(hipMalloc((void **) &dev_y_values, sizeof(float)*y_values.size())); checkCudaErrors(hipMalloc((void **) &dev_z_values, sizeof(float)*z_values.size())); checkCudaErrors(hipMemcpy(dev_x_values, x_values_temp, sizeof(float)*x_values.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_y_values, y_values_temp, sizeof(float)*y_values.size(), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_z_values, z_values_temp, sizeof(float)*z_values.size(), hipMemcpyHostToDevice)); timing[0] = stop_time("Input malloc and copy HtoD"); #ifdef CUDA_MANAGED_TRANSFER } #endif start_time(); hipLaunchKernelGGL(( voteHoughSpace) , dim3(x_values.size()), dim3(Nphi), 0, 0, dev_x_values, dev_y_values, dev_z_values, dev_accMat, dtheta, drho, dphi); //assumes that Nphi == Nrho timing[2] = stop_time("Vote"); //#ifdef VERBOSE_DUMP checkCudaErrors(hipMemcpy((void *) &debug_accMat, dev_accMat, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)), hipMemcpyDeviceToHost)); //#endif // conteggia gli elementi della matrice int total=0; for (int r=0; r<Nsec*Ntheta*Nphi*Nrho; r++){ cout << debug_accMat[r] << endl; } //CPU execution for(unsigned int i = 0; i < x_values.size(); i++){ //cout << x_values.at(i) << " - "; //cout << y_values.at(i) << endl; float R2=x_values.at(i)*x_values.at(i)+y_values.at(i)*y_values.at(i); float theta=acos(z_values.at(i)/sqrt(R2+z_values.at(i)*z_values.at(i))); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_values.at(i),x_values.at(i)); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor(sec/2/M_PI*Nsec); for(int iphi = 0; iphi < Nphi; iphi++){ float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_values.at(i)*cos(phi)+y_values.at(i)*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); if (rho<=rhomax && rho>rhomin) { acc_Mat[isec][ith][irho][iphi]++; } } } #ifdef VERBOSE_DUMP //check unsigned int corretto = 0; unsigned int errore = 0; unsigned int letto = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[isec][ith][irho][iphi] != debug_accMat[isec][ith][irho][iphi]){ printf("diverso acc_Mat[%d][%d][%d][%d] %d - debug_accMat[%d][%d][%d][%d] %d \n", isec, ith, irho, iphi, acc_Mat[isec][ith][irho][iphi], isec, ith, irho, iphi, debug_accMat[isec][ith][irho][iphi]); errore++; }else corretto++; letto++; } } } } printf("corretti %d sbaglati %d; letti %d\n", corretto, errore, letto); /*for(unsigned int i = 0; i < Nsec; i++){ cout << "sec " << i << ":" << endl; for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[i][ith][iphi][irho] != 0) cout << "accMat[get3DIndex(" << ith << ", " << iphi << ", " << irho << ") = " << acc_Mat[i][ith][iphi][irho] << endl; } } } }*/ #endif checkCudaErrors(hipFree(dev_x_values)); checkCudaErrors(hipFree(dev_y_values)); checkCudaErrors(hipFree(dev_z_values)); #ifndef CUDA_MANAGED_TRANSFER free(x_values_temp); free(y_values_temp); free(z_values_temp); #endif x_values.clear(); y_values.clear(); z_values.clear(); //trova il massimo relativo unsigned int host_NMrel = 0; // --- Prendiamo le informazioni specifiche della GPU per la divisione del lavoro appropriata unsigned int maxThreadsPerBlock = deviceProp[0].maxThreadsPerBlock; #ifndef PARALLEL_REDUX_MAX struct track_param *dev_indexOutput; Lock my_lock; unsigned int *NMrel; start_time(); #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ checkCudaErrors(hipMallocManaged(&dev_indexOutput,(sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi)) )); checkCudaErrors(hipMallocManaged(&NMrel,sizeof(unsigned int) )); *NMrel = 0; }else{ #endif checkCudaErrors(hipMalloc((void **) &NMrel, (sizeof(unsigned int)))); checkCudaErrors(hipMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(hipMalloc((void **) &dev_indexOutput, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )) )); #ifdef CUDA_MANAGED_TRANSFER } #endif checkCudaErrors(hipMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); timing[1] += stop_time("malloc dev_indexOutput+NMrel and memset"); // dividiamo adeguatamente il lavoro // in base al numero massimo di thread disponibili in un singolo thread-block unsigned int dim_x_block = Nphi; unsigned int dim_y_block = maxThreadsPerBlock/dim_x_block; unsigned int dim_x_grid = Nsec; unsigned int dim_y_grid = Ntheta; unsigned int dim_z_grid = (Nrho/dim_y_block); dim3 grid(dim_x_grid, dim_y_grid, dim_z_grid); dim3 block(dim_x_block, dim_y_block); start_time(); hipLaunchKernelGGL(( findRelativeMax), dim3(grid), dim3(block), 0, 0, dev_accMat, dev_indexOutput, NMrel); timing[3] = stop_time("Max. Relative"); size_t block_shMemsize = dim_x_block * dim_y_block * sizeof(int); //block_shMemsize *= OUT_VIEW_FRAME; //add more cells to each block shared-memory bank cout << "sh memsize " << block_shMemsize << endl; checkCudaErrors(hipMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(hipMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); hipLaunchKernelGGL(( findRelativeMax_withShared) , dim3(grid), dim3(block), block_shMemsize, 0, dev_accMat, dev_indexOutput, NMrel); start_time(); #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipMemcpy((void *) host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), hipMemcpyDeviceToHost)); #else checkCudaErrors(hipMemcpy((void *) &host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), hipMemcpyDeviceToHost)); #endif #endif #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ host_NMrel = *NMrel; }else{ #endif checkCudaErrors(hipMemcpy((void *) &host_NMrel, NMrel, (sizeof(int)), hipMemcpyDeviceToHost)); #ifdef CUDA_MANAGED_TRANSFER } #endif timing[4] = stop_time("Copy results DtoH"); #ifdef VERBOSE_DUMP cout << "NMrel from GPU "<< host_NMrel << endl; unsigned int ntracks = 0; /*for(unsigned int i = 0; ((i < (Nsec * Ntheta * Nphi * Nrho)) && (ntracks < host_NMrel)); i++){ #ifndef CUDA_MANAGED_TRANSFER if(host_out_tracks[i].acc > -1){ cout << "track " << ntracks << " host_out_tracks value = " << host_out_tracks[i].acc << " [" << i << "]" << endl; ntracks++; } #else if(dev_indexOutput[i].acc > -1){ cout << "track " << ntracks << " dev_indexOutput value = " << dev_indexOutput[i].acc << " [" << i << "]" << endl; ntracks++; } #endif }*/ #endif //free mem checkCudaErrors(hipFree(dev_indexOutput)); checkCudaErrors(hipFree(NMrel)); //print timing results with this format: // NHIT HtoD_input MEMSET_cumulative VOTE MAX_REL DtoH_output cout << N_HITS << " " << timing[0] << " " << timing[1] << " " << timing[2] << " " << timing[3] << " " << timing[4] << endl; #else #define SET_GRID_DIM(npoints, threadsPerBlock) ceil((npoints+((threadsPerBlock)-1))/(threadsPerBlock)) unsigned int half_grid = SET_GRID_DIM((Nsec*Ntheta*Nphi*Nrho), maxThreadsPerBlock)/2; dim3 grid(half_grid, 2); unsigned int n_blocks = half_grid * 2; int * dev_maxBlockOutput; checkCudaErrors(hipMalloc((void **) &dev_maxBlockOutput, (sizeof(int) * n_blocks))); int * dev_maxRelOutput; checkCudaErrors(hipMalloc((void **) &dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)))); hipLaunchKernelGGL(( reduceParallelMax), dim3(grid), dim3(maxThreadsPerBlock), 2*(maxThreadsPerBlock*sizeof(int)), 0, dev_accMat, dev_maxBlockOutput, dev_maxRelOutput, (Nsec*Ntheta*Nphi*Nrho)); int *host_maxBlockOutput = (int *) malloc((sizeof(int)* n_blocks)); checkCudaErrors(hipMemcpy(host_maxBlockOutput, dev_maxBlockOutput, (sizeof(int) * n_blocks), hipMemcpyDeviceToHost)); int *host_maxRelOutput = (int *) malloc((sizeof(int)* (Nsec*Ntheta*Nphi*Nrho))); checkCudaErrors(hipMemcpy(host_maxRelOutput, dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)), hipMemcpyDeviceToHost)); unsigned int debug = 0; for(unsigned int i = 0; i < n_blocks; i++){ if(host_maxBlockOutput[i] != 0){ cout << "block " << i << " max: " << host_maxBlockOutput[i] << " [" << i*maxThreadsPerBlock << "]" << endl; host_NMrel++; } unsigned int found = 0; for(unsigned int y = 0; y < maxThreadsPerBlock; y++){ unsigned int globalIndex = (y+(i*maxThreadsPerBlock)); if((host_maxRelOutput[globalIndex] != 0)) { cout << "out["<< globalIndex << "]="<< host_maxRelOutput[globalIndex]<< " "; found++; debug++; } } if(found > 0) cout << " (block "<< i << ")" << endl << endl; } /*for(unsigned int i = 0; i < (Nsec*Ntheta*Nphi*Nrho); i += maxThreadsPerBlock){ if(host_maxBlockOutput[i] != 0) cout << "block" << i/maxThreadsPerBlock << " max: " << host_maxBlockOutput[i] << " [" << i << "]" << endl; unsigned int found = 0; for(unsigned int y = 0; y < (maxThreadsPerBlock); y++){ // check relative maxima if((host_maxRelOutput[i+y] != 0)){ cout << "out["<< i+y << "]="<< host_maxRelOutput[i+y]<< " "; found++; host_NMrel++;} } if(found > 0) cout << endl << endl; }*/ cout << "NMrel from GPU "<< host_NMrel << " " << debug << endl; hipFree(dev_maxBlockOutput); hipFree(dev_maxRelOutput); free(host_maxBlockOutput); free(host_maxRelOutput); #endif host_NMrel = 0; int accumax = -1; int iphiMax = 0; int irhoMax = 0; int ithMax = 0; int isecMax = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 1; iphi < Nphi-1; iphi++){ for(unsigned int irho = 1; irho < Nrho-1; irho++){ float acc=acc_Mat[isec][ith][irho][iphi]; if (acc >= ac_soglia){ if (acc > accumax){ accumax=acc; } /*if (acc>acc_Mat[isec][ith-1][iphi][irho] && acc >= acc_Mat[isec][ith+1][iphi][irho]){ if (acc>acc_Mat[isec][ith][iphi-1][irho-1] && acc >= acc_Mat[isec][ith][iphi-1][irho+1]){ //TODO: chiedi a Lorenzo perch [iphi+1][irho+1] invece di [iphi-1][irho+1] if (acc>acc_Mat[isec][ith][iphi][irho-1] && acc >= acc_Mat[isec][ith][iphi][irho+1]){ if (acc>acc_Mat[isec][ith][iphi+1][irho-1] && acc >= acc_Mat[isec][ith][iphi+1][irho+1]){*/ if(acc > acc_Mat[isec][ith][irho-1][iphi] && acc >= acc_Mat[isec][ith][irho+1][iphi]){ if(acc > acc_Mat[isec][ith][irho][iphi-1] && acc >= acc_Mat[isec][ith][irho][iphi+1]){ //if (acc>=acc_Mat[isec][ith][irho][iphi+1] ){ accumax = acc_Mat[isec][ith][irho][iphi+1]; //Max_rel[isec][ith][irho][iphi+1]=1; host_NMrel++; ithMax=ith; irhoMax=irho; iphiMax=iphi; isecMax=isec+1; float t_th=(thetamin+ithMax*dtheta)*360.f/M_PI; float t_rho=rhomin+irhoMax*drho; float t_phi=phimin+iphiMax*dphi; //float q=t_rho/sin(t_phi); //float xm=-1/tan(t_phi); //cout << acc <<" "<< t_rho <<" "<< t_phi << " " << isecMax << endl; //} //} //} } } } } } } } #ifdef VERBOSE_DUMP cout << "NMrel from CPU "<< host_NMrel << endl; #endif checkCudaErrors(hipFree(dev_accMat)); } #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(hipHostFree(host_out_tracks)); #endif #endif return 0; } /***************************** * file opener *****************************/ void read_inputFile(string file_path, unsigned int num_hits) { ifstream input_f; string line; string value; stringstream ss; unsigned int val_iter; unsigned int line_read = 0; input_f.open(file_path.c_str()); if (input_f.is_open()) { while ( getline (input_f,line) && (line_read < num_hits) ) { val_iter = 0; ss.str(line); //prendiamo dati direttamente dal file ASCII in input while(ss >> value){ //i valori che ci interessano sono X, Y e Z if (val_iter == 0) x_values.push_back(atof(value.c_str())); else if (val_iter == 1) y_values.push_back(atof(value.c_str())); else if (val_iter == 2) z_values.push_back(atof(value.c_str())); val_iter++; } ss.clear(); line_read++; } input_f.close(); } }
8749f669dee85ed8ac23613b1e2746e2f9cfc9e6.cu
// // ht_helix.cpp // // // Created by Lorenzo Rinaldi on 29/04/14. // // // compile: // nvcc -I/usr/local/cuda-5.5/samples/common/inc -I/usr/local/cuda-5.5/targets/x86_64-linux/include -gencode arch=compute_20,code=sm_21 -o ht_rhophi ht_rhophi.cu //NOTE: INVERTITE DIMENSIONI NRHO-NPHI PER ACCESSO MATRICE #include <cuda_runtime.h> // includes, project #include <helper_cuda.h> #include <helper_functions.h> // helper utility functions #include "simpleIndexing.cu" #include <string.h> #include <cmath> #include <algorithm> #include <vector> #include <iostream> #include <fstream> #include <sstream> #include <unistd.h> using namespace std; #define NHMAX 300 #define Nsec 4 // Numero settori in piano trasverso #define Ntheta 16 // Numero settori in piano longitudinale #define Nphi 1024 // Numero bin angolo polare #define Nrho 1024 // Numero bin distanza radiale #define rhomin 500.f // mm #define rhomax 100000.f // mm #define phimin 0.f // rad #define phimax 2*M_PI // rad #define thetamin 0.f // rad #define thetamax M_PI // rad #define ac_soglia 4 // soglia nella matrice di accumulazione /* --- DEFINE TO ALTER EXECUTION --- */ //#define PARALLEL_REDUX_MAX //NOTE: still wrong!! do not use it //#define VERBOSE_DUMP #define CUDA_MALLOCHOST_OUTPUT //#define CUDA_MANAGED_TRANSFER #define max_tracks_out 100 int acc_Mat [ Nsec ][ Ntheta ][ Nrho ] [Nphi ]; //int Max_rel [ Nsec ][ Ntheta ][Nphi ] [Nrho ]; int debug_accMat[ Nsec ][ Ntheta ][ Nrho ] [ Nphi ]; float dtheta= M_PI/Ntheta; float drho= (rhomax-rhomin)/Nrho; float dphi= (phimax-phimin)/Nphi; vector<float> x_values; vector<float> y_values; vector<float> z_values; #define OUT_VIEW_FRAME 3; #ifndef PARALLEL_REDUX_MAX struct track_param{ int acc; /*unsigned int isec; unsigned int ith; unsigned int iphi; unsigned int irho;*/ }; #ifndef CUDA_MALLOCHOST_OUTPUT struct track_param host_out_tracks[ Nsec * Ntheta * Nrho * Nphi ]; #endif #endif //lock definition #ifndef __LOCK_H__ #define __LOCK_H__ struct Lock { int *mutex; Lock( void ) { cudaMalloc( (void**)&mutex, sizeof(int) ) ; cudaMemset( mutex, 0, sizeof(int) ); } ~Lock( void ) { cudaFree( mutex ); } __device__ void lock( void ) { while( atomicCAS( mutex, 0, 1 ) != 0 ); } __device__ void unlock( void ) { atomicExch( mutex, 0 ); } }; #endif //end lock void read_inputFile(string file_path, unsigned int num_hits); // CUDA timer macros cudaEvent_t c_start, c_stop; inline void start_time() { cudaEventCreate(&c_start); cudaEventCreate(&c_stop); cudaEventRecord(c_start, 0); } inline float stop_time(const char *msg) { float elapsedTime = 0; cudaEventRecord(c_stop, 0); cudaEventSynchronize(c_stop); cudaEventElapsedTime(&elapsedTime, c_start, c_stop); //printf("Time to %s: %.3f ms\n", msg, elapsedTime); cudaEventDestroy(c_start); cudaEventDestroy(c_stop); return elapsedTime; } //#define floatToInt(x) (((x) >= 0) ? (int)((x) + 0.5) : (int)((x) - 0.5)) #define get4DIndex(s,t,r,p) ((s)*(Ntheta*Nrho*Nphi))+(((t)*Nrho*Nphi) +(((r)*Nphi)+(p))) #define get2DIndex(r,p) (((r)*Nphi)+(p)) __global__ void voteHoughSpace(float *dev_x_values, float *dev_y_values, float *dev_z_values, int *dev_accMat, float dtheta, float drho, float dphi){ __shared__ float x_val; __shared__ float y_val; __shared__ float z_val; if(threadIdx.x == 0){ x_val = dev_x_values[blockIdx.x]; y_val = dev_y_values[blockIdx.x]; z_val = dev_z_values[blockIdx.x]; } __syncthreads(); float R2 = x_val*x_val + y_val*y_val; float theta=acos(z_val/sqrt(R2+z_val*z_val)); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_val,x_val); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor((sec/2/M_PI*Nsec)); int iphi = threadIdx.x; float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_val*cos(phi)+y_val*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); int accu_index = get4DIndex(isec, ith, irho, iphi);//(isec*(Ntheta*Nphi*Nrho))+((ith*Nphi*Nrho) +((iphi*Nrho)+irho)); if (rho<=rhomax && rho>rhomin) { atomicAdd(&(dev_accMat[accu_index]),1); } } #ifndef PARALLEL_REDUX_MAX __global__ void findRelativeMax_withShared(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ extern __shared__ int SH_local_accMat[]; //check if it is a local maxima by verifying that it is greater then (>=) its neighboors /*unsigned int index_Y0 = get2DIndex(0,iphi); unsigned int index_Y1 = get2DIndex(1,iphi); unsigned int index_Y2 = get2DIndex(2,iphi);*/ unsigned int index_Y1 = iphi; SH_local_accMat[index_Y1] = dev_accMat[get4DIndex(isec, ith, irho, iphi)]; //save into shared memory this thread accumulator //In order to avoid oppressing global memory access, we delegate upper and lower rows, irho+1 and irho-1, loading into shared memory //only to those threads which passes the first "cut" on threshold //__syncthreads(); //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ if (SH_local_accMat[index_Y1] >= ac_soglia){ //we're sure that each thread has its own acc saved in shared memory /*SH_local_accMat[index_Y0] = dev_accMat[get4DIndex(isec, ith, irho-1, iphi)]; SH_local_accMat[index_Y2] = dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]; __syncthreads();*/ //NOTE: since we only access once (irho-1,iphi) and (irho+1,iphi) for this computation, and there isn't any reuse for other //threads of these informations, we don't need to put the other two rows in shared memory //(x,y) > (x,y-1) && (x,y) >= (x,y+1) /*if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y0] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y2]){*/ if(SH_local_accMat[index_Y1] > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && SH_local_accMat[index_Y1] >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ //__syncthreads(); //this is just to make sure that all threads had written in the shared memory, before reading each other values //(x,y) > (x-1, y) && (x,y) >= (x+1, y) if(SH_local_accMat[index_Y1] > SH_local_accMat[index_Y1-1] && SH_local_accMat[index_Y1] >= SH_local_accMat[index_Y1+1]){ /*atomicAdd(&local_NMrel, 1);*/ //NOTE atomic op on shared memory are SLOWER than global memory, because they're implemented in software atomicAdd(NMrel, 1); dev_output[globalIndex].acc = SH_local_accMat[index_Y1]; //dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ } } } } } __global__ void findRelativeMax(int *dev_accMat, struct track_param *dev_output, unsigned int *NMrel){ unsigned int isec = blockIdx.x; unsigned int ith = blockIdx.y; unsigned int iphi = threadIdx.x; unsigned int irho = blockIdx.z; unsigned int globalIndex = getGlobalIdx_2D_2D(); //unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; /*__shared__ unsigned int local_NMrel; if(threadIdx.x == 0) local_NMrel = 0; __syncthreads();*/ //check if it is a local maxima by verifying that it is greater then (>=) its neighboors //we must check from isec >= 0, ith >= 0, iphi >= 1, irho >= 1 if(((iphi > 0) && (irho > 0)) && ((iphi < Nphi-1) && (irho < Nrho-1))){ //each thread is assigned to one point of the accum. matrix: int acc= dev_accMat[get4DIndex(isec, ith, irho, iphi)]; if (acc >= ac_soglia){ if(acc > dev_accMat[get4DIndex(isec, ith, irho-1, iphi)] && acc >= dev_accMat[get4DIndex(isec, ith, irho+1, iphi)]){ if(acc > dev_accMat[get4DIndex(isec, ith, irho, iphi-1)] && acc >= dev_accMat[get4DIndex(isec, ith, irho, iphi+1)]){ /*atomicAdd(&local_NMrel, 1); if(threadIdx.x == 0){ mutex.lock(); *NMrel += local_NMrel; mutex.unlock(); }*/ atomicAdd(NMrel, 1); //mutex.lock(); dev_output[globalIndex].acc = acc; /*dev_output[globalIndex].isec = isec; dev_output[globalIndex].ith = ith; dev_output[globalIndex].iphi = iphi; dev_output[globalIndex].irho = irho;*/ //mutex.unlock(); } } } } } #else //NOTE: wrong approach to solve this problem //TODO: improve as on slides __global__ void reduceParallelMax(int *dev_accMat, int *dev_output, int *dev_maxRelOutput, unsigned int N){ extern __shared__ int sdata[]; int* max_sdata = (int *) sdata; int* relMax_sdata = (int *) &sdata[blockDim.x]; //each thread loads one element from global to shared mem unsigned int tid = threadIdx.x; //local index //unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; //global index (1D grid - 1D block) unsigned int i = getGlobalIdx_2D_1D(); if(i < N){ //check if thread is in data bounds max_sdata[tid] = dev_accMat[i]; relMax_sdata[tid] = dev_accMat[i]; __syncthreads(); //do reduction in shared memory for(unsigned int s=1; s < blockDim.x; s*=2){ if(tid % (2*s) == 0){ //it is for a different stride //atomicMax(&(max_sdata[tid]),max_sdata[tid+s]); //TODO: change without atomic max_sdata[tid] = (max_sdata[tid] > max_sdata[tid+s]) ? max_sdata[tid] : max_sdata[tid+s]; __syncthreads(); } __syncthreads(); } //write results (now found in the first element of the array) for this block to global memory //if(tid == 0) dev_output[blockIdx.x] = sdata[0]; if(tid == 0) dev_output[blockIdx.x] = max_sdata[0]; //at sdata[0], we found the maximum if(relMax_sdata[tid] >= ac_soglia){ dev_maxRelOutput[i] = relMax_sdata[tid]; }else{ dev_maxRelOutput[i] = 0; } } } #endif void help(char* prog) { printf("Use %s [-l #loops] [-n #hitsToRead] [-h] \n\n", prog); printf(" -l loops Number of executions (Default: 1).\n"); printf(" -n hits Number of hits to read from input file (Default: 236).\n"); printf(" -h This help.\n"); } int main(int argc, char* argv[]){ unsigned int N_LOOPS = 1; unsigned int N_HITS = 236; int c; //getting command line options while ( (c = getopt(argc, argv, "l:n:h")) != -1 ) { switch(c) { case 'n': N_HITS = atoi(optarg); break; case 'l': N_LOOPS = atoi(optarg); break; case 'h': help(argv[0]); return 0; break; default: printf("Unkown option!\n"); help(argv[0]); return 0; } } int GPU_N; checkCudaErrors(cudaGetDeviceCount(&GPU_N)); cudaDeviceProp *deviceProp; deviceProp = (cudaDeviceProp *) malloc(sizeof(cudaDeviceProp)*GPU_N); for(unsigned int i = 0; i < GPU_N; i++){ checkCudaErrors(cudaGetDeviceProperties(&deviceProp[i], i)); cout << deviceProp[i].name << endl; } #ifndef CUDA_MANAGED_TRANSFER struct track_param *host_out_tracks; start_time(); #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaMallocHost((void **) &host_out_tracks, (sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)))); #else host_out_tracks = malloc(sizeof(struct track_param)*(Nsec * Ntheta * Nrho * Nphi)); #endif float init_outputMatrix = stop_time("init output matrix with cudaMallocHost"); cout << "time to init output matrix (once): " << init_outputMatrix << endl; #endif int *dev_accMat; float *dev_x_values; float *dev_y_values; float *dev_z_values; float *x_values_temp; float *y_values_temp; float *z_values_temp; //executions loop for(unsigned int loop = 0; loop < N_LOOPS; loop++){ float timing[5]; //float R = 0.f; // Inizializzo a zero le matrici memset(&acc_Mat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); memset(&debug_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)) ); //memset(&Max_rel, 0, (sizeof(int)*(Nsec*Ntheta*Nphi*Nrho)) ); //alloc accumulator matrix on GPU start_time(); checkCudaErrors(cudaMalloc((void **) &dev_accMat, (sizeof(int)* (Nsec * Ntheta * Nrho*Nphi)) )); checkCudaErrors(cudaMemset(dev_accMat, 0, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)))); timing[1] = stop_time("malloc dev_accMat and memset(0)"); //riempi i valori dentro x_values , y_values , z_values read_inputFile("hits-5000.txt", N_HITS); // read_inputFile("../datafiles/hits-1.txt"); #ifdef CUDA_MANAGED_TRANSFER int cudaVer = 0; cudaRuntimeGetVersion(&cudaVer); if(cudaVer >= 6000){ start_time(); checkCudaErrors(cudaMallocManaged(&dev_x_values,sizeof(float)*x_values.size())); checkCudaErrors(cudaMallocManaged(&dev_y_values,sizeof(float)*y_values.size())); checkCudaErrors(cudaMallocManaged(&dev_z_values,sizeof(float)*z_values.size())); for(unsigned int i = 0; i < x_values.size(); i++){ dev_x_values[i] = x_values.at(i); dev_y_values[i] = y_values.at(i); dev_z_values[i] = z_values.at(i); } timing[0] = stop_time("Input malloc and copy HtoD"); }else{ #endif x_values_temp = (float*) malloc(sizeof(float)*x_values.size()); y_values_temp = (float*) malloc(sizeof(float)*y_values.size()); z_values_temp = (float*) malloc( sizeof(float)*z_values.size()); for(unsigned int i = 0; i < x_values.size(); i++){ x_values_temp[i] = x_values.at(i); y_values_temp[i] = y_values.at(i); z_values_temp[i] = z_values.at(i); } start_time(); checkCudaErrors(cudaMalloc((void **) &dev_x_values, sizeof(float)*x_values.size())); checkCudaErrors(cudaMalloc((void **) &dev_y_values, sizeof(float)*y_values.size())); checkCudaErrors(cudaMalloc((void **) &dev_z_values, sizeof(float)*z_values.size())); checkCudaErrors(cudaMemcpy(dev_x_values, x_values_temp, sizeof(float)*x_values.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_y_values, y_values_temp, sizeof(float)*y_values.size(), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_z_values, z_values_temp, sizeof(float)*z_values.size(), cudaMemcpyHostToDevice)); timing[0] = stop_time("Input malloc and copy HtoD"); #ifdef CUDA_MANAGED_TRANSFER } #endif start_time(); voteHoughSpace <<<x_values.size(), Nphi>>> (dev_x_values, dev_y_values, dev_z_values, dev_accMat, dtheta, drho, dphi); //assumes that Nphi == Nrho timing[2] = stop_time("Vote"); //#ifdef VERBOSE_DUMP checkCudaErrors(cudaMemcpy((void *) &debug_accMat, dev_accMat, (sizeof(int)*(Nsec*Ntheta*Nrho*Nphi)), cudaMemcpyDeviceToHost)); //#endif // conteggia gli elementi della matrice int total=0; for (int r=0; r<Nsec*Ntheta*Nphi*Nrho; r++){ cout << debug_accMat[r] << endl; } //CPU execution for(unsigned int i = 0; i < x_values.size(); i++){ //cout << x_values.at(i) << " - "; //cout << y_values.at(i) << endl; float R2=x_values.at(i)*x_values.at(i)+y_values.at(i)*y_values.at(i); float theta=acos(z_values.at(i)/sqrt(R2+z_values.at(i)*z_values.at(i))); //int ith=(int) (theta/dtheta)+0.5f; int ith = floor((theta/dtheta)); float sec=atan2(y_values.at(i),x_values.at(i)); if (sec<0.f) { sec=2*M_PI+sec; } //int isec=int(sec/2/M_PI*Nsec); int isec = floor(sec/2/M_PI*Nsec); for(int iphi = 0; iphi < Nphi; iphi++){ float phi=phimin+iphi*dphi; float rho=R2/2.f/(x_values.at(i)*cos(phi)+y_values.at(i)*sin(phi)); //int irho=(int)((rho-rhomin)/drho)+0.5f; int irho = floor(((rho-rhomin)/drho)); if (rho<=rhomax && rho>rhomin) { acc_Mat[isec][ith][irho][iphi]++; } } } #ifdef VERBOSE_DUMP //check unsigned int corretto = 0; unsigned int errore = 0; unsigned int letto = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[isec][ith][irho][iphi] != debug_accMat[isec][ith][irho][iphi]){ printf("diverso acc_Mat[%d][%d][%d][%d] %d - debug_accMat[%d][%d][%d][%d] %d \n", isec, ith, irho, iphi, acc_Mat[isec][ith][irho][iphi], isec, ith, irho, iphi, debug_accMat[isec][ith][irho][iphi]); errore++; }else corretto++; letto++; } } } } printf("corretti %d sbaglati %d; letti %d\n", corretto, errore, letto); /*for(unsigned int i = 0; i < Nsec; i++){ cout << "sec " << i << ":" << endl; for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 0; iphi < Nphi; iphi++){ for(unsigned int irho = 0; irho < Nrho; irho++){ if(acc_Mat[i][ith][iphi][irho] != 0) cout << "accMat[get3DIndex(" << ith << ", " << iphi << ", " << irho << ") = " << acc_Mat[i][ith][iphi][irho] << endl; } } } }*/ #endif checkCudaErrors(cudaFree(dev_x_values)); checkCudaErrors(cudaFree(dev_y_values)); checkCudaErrors(cudaFree(dev_z_values)); #ifndef CUDA_MANAGED_TRANSFER free(x_values_temp); free(y_values_temp); free(z_values_temp); #endif x_values.clear(); y_values.clear(); z_values.clear(); //trova il massimo relativo unsigned int host_NMrel = 0; // --- Prendiamo le informazioni specifiche della GPU per la divisione del lavoro appropriata unsigned int maxThreadsPerBlock = deviceProp[0].maxThreadsPerBlock; #ifndef PARALLEL_REDUX_MAX struct track_param *dev_indexOutput; Lock my_lock; unsigned int *NMrel; start_time(); #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ checkCudaErrors(cudaMallocManaged(&dev_indexOutput,(sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi)) )); checkCudaErrors(cudaMallocManaged(&NMrel,sizeof(unsigned int) )); *NMrel = 0; }else{ #endif checkCudaErrors(cudaMalloc((void **) &NMrel, (sizeof(unsigned int)))); checkCudaErrors(cudaMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void **) &dev_indexOutput, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )) )); #ifdef CUDA_MANAGED_TRANSFER } #endif checkCudaErrors(cudaMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); timing[1] += stop_time("malloc dev_indexOutput+NMrel and memset"); // dividiamo adeguatamente il lavoro // in base al numero massimo di thread disponibili in un singolo thread-block unsigned int dim_x_block = Nphi; unsigned int dim_y_block = maxThreadsPerBlock/dim_x_block; unsigned int dim_x_grid = Nsec; unsigned int dim_y_grid = Ntheta; unsigned int dim_z_grid = (Nrho/dim_y_block); dim3 grid(dim_x_grid, dim_y_grid, dim_z_grid); dim3 block(dim_x_block, dim_y_block); start_time(); findRelativeMax<<<grid, block>>>(dev_accMat, dev_indexOutput, NMrel); timing[3] = stop_time("Max. Relative"); size_t block_shMemsize = dim_x_block * dim_y_block * sizeof(int); //block_shMemsize *= OUT_VIEW_FRAME; //add more cells to each block shared-memory bank cout << "sh memsize " << block_shMemsize << endl; checkCudaErrors(cudaMemset(NMrel, 0, sizeof(unsigned int))); checkCudaErrors(cudaMemset(dev_indexOutput, -1, (sizeof(struct track_param)* (Nsec * Ntheta * Nrho * Nphi )))); findRelativeMax_withShared <<<grid, block, block_shMemsize>>> (dev_accMat, dev_indexOutput, NMrel); start_time(); #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaMemcpy((void *) host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), cudaMemcpyDeviceToHost)); #else checkCudaErrors(cudaMemcpy((void *) &host_out_tracks, dev_indexOutput, (sizeof(int)* (Nsec * Ntheta * Nrho* Nphi)), cudaMemcpyDeviceToHost)); #endif #endif #ifdef CUDA_MANAGED_TRANSFER if(cudaVer >= 6000){ host_NMrel = *NMrel; }else{ #endif checkCudaErrors(cudaMemcpy((void *) &host_NMrel, NMrel, (sizeof(int)), cudaMemcpyDeviceToHost)); #ifdef CUDA_MANAGED_TRANSFER } #endif timing[4] = stop_time("Copy results DtoH"); #ifdef VERBOSE_DUMP cout << "NMrel from GPU "<< host_NMrel << endl; unsigned int ntracks = 0; /*for(unsigned int i = 0; ((i < (Nsec * Ntheta * Nphi * Nrho)) && (ntracks < host_NMrel)); i++){ #ifndef CUDA_MANAGED_TRANSFER if(host_out_tracks[i].acc > -1){ cout << "track " << ntracks << " host_out_tracks value = " << host_out_tracks[i].acc << " [" << i << "]" << endl; ntracks++; } #else if(dev_indexOutput[i].acc > -1){ cout << "track " << ntracks << " dev_indexOutput value = " << dev_indexOutput[i].acc << " [" << i << "]" << endl; ntracks++; } #endif }*/ #endif //free mem checkCudaErrors(cudaFree(dev_indexOutput)); checkCudaErrors(cudaFree(NMrel)); //print timing results with this format: // NHIT HtoD_input MEMSET_cumulative VOTE MAX_REL DtoH_output cout << N_HITS << " " << timing[0] << " " << timing[1] << " " << timing[2] << " " << timing[3] << " " << timing[4] << endl; #else #define SET_GRID_DIM(npoints, threadsPerBlock) ceil((npoints+((threadsPerBlock)-1))/(threadsPerBlock)) unsigned int half_grid = SET_GRID_DIM((Nsec*Ntheta*Nphi*Nrho), maxThreadsPerBlock)/2; dim3 grid(half_grid, 2); unsigned int n_blocks = half_grid * 2; int * dev_maxBlockOutput; checkCudaErrors(cudaMalloc((void **) &dev_maxBlockOutput, (sizeof(int) * n_blocks))); int * dev_maxRelOutput; checkCudaErrors(cudaMalloc((void **) &dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)))); reduceParallelMax<<<grid, maxThreadsPerBlock, 2*(maxThreadsPerBlock*sizeof(int))>>>(dev_accMat, dev_maxBlockOutput, dev_maxRelOutput, (Nsec*Ntheta*Nphi*Nrho)); int *host_maxBlockOutput = (int *) malloc((sizeof(int)* n_blocks)); checkCudaErrors(cudaMemcpy(host_maxBlockOutput, dev_maxBlockOutput, (sizeof(int) * n_blocks), cudaMemcpyDeviceToHost)); int *host_maxRelOutput = (int *) malloc((sizeof(int)* (Nsec*Ntheta*Nphi*Nrho))); checkCudaErrors(cudaMemcpy(host_maxRelOutput, dev_maxRelOutput, (sizeof(int) * (Nsec*Ntheta*Nphi*Nrho)), cudaMemcpyDeviceToHost)); unsigned int debug = 0; for(unsigned int i = 0; i < n_blocks; i++){ if(host_maxBlockOutput[i] != 0){ cout << "block " << i << " max: " << host_maxBlockOutput[i] << " [" << i*maxThreadsPerBlock << "]" << endl; host_NMrel++; } unsigned int found = 0; for(unsigned int y = 0; y < maxThreadsPerBlock; y++){ unsigned int globalIndex = (y+(i*maxThreadsPerBlock)); if((host_maxRelOutput[globalIndex] != 0)) { cout << "out["<< globalIndex << "]="<< host_maxRelOutput[globalIndex]<< " "; found++; debug++; } } if(found > 0) cout << " (block "<< i << ")" << endl << endl; } /*for(unsigned int i = 0; i < (Nsec*Ntheta*Nphi*Nrho); i += maxThreadsPerBlock){ if(host_maxBlockOutput[i] != 0) cout << "block" << i/maxThreadsPerBlock << " max: " << host_maxBlockOutput[i] << " [" << i << "]" << endl; unsigned int found = 0; for(unsigned int y = 0; y < (maxThreadsPerBlock); y++){ // check relative maxima if((host_maxRelOutput[i+y] != 0)){ cout << "out["<< i+y << "]="<< host_maxRelOutput[i+y]<< " "; found++; host_NMrel++;} } if(found > 0) cout << endl << endl; }*/ cout << "NMrel from GPU "<< host_NMrel << " " << debug << endl; cudaFree(dev_maxBlockOutput); cudaFree(dev_maxRelOutput); free(host_maxBlockOutput); free(host_maxRelOutput); #endif host_NMrel = 0; int accumax = -1; int iphiMax = 0; int irhoMax = 0; int ithMax = 0; int isecMax = 0; for(unsigned int isec = 0; isec < Nsec; isec++){ for(unsigned int ith = 0; ith < Ntheta; ith++){ for(unsigned int iphi = 1; iphi < Nphi-1; iphi++){ for(unsigned int irho = 1; irho < Nrho-1; irho++){ float acc=acc_Mat[isec][ith][irho][iphi]; if (acc >= ac_soglia){ if (acc > accumax){ accumax=acc; } /*if (acc>acc_Mat[isec][ith-1][iphi][irho] && acc >= acc_Mat[isec][ith+1][iphi][irho]){ if (acc>acc_Mat[isec][ith][iphi-1][irho-1] && acc >= acc_Mat[isec][ith][iphi-1][irho+1]){ //TODO: chiedi a Lorenzo perché [iphi+1][irho+1] invece di [iphi-1][irho+1] if (acc>acc_Mat[isec][ith][iphi][irho-1] && acc >= acc_Mat[isec][ith][iphi][irho+1]){ if (acc>acc_Mat[isec][ith][iphi+1][irho-1] && acc >= acc_Mat[isec][ith][iphi+1][irho+1]){*/ if(acc > acc_Mat[isec][ith][irho-1][iphi] && acc >= acc_Mat[isec][ith][irho+1][iphi]){ if(acc > acc_Mat[isec][ith][irho][iphi-1] && acc >= acc_Mat[isec][ith][irho][iphi+1]){ //if (acc>=acc_Mat[isec][ith][irho][iphi+1] ){ accumax = acc_Mat[isec][ith][irho][iphi+1]; //Max_rel[isec][ith][irho][iphi+1]=1; host_NMrel++; ithMax=ith; irhoMax=irho; iphiMax=iphi; isecMax=isec+1; float t_th=(thetamin+ithMax*dtheta)*360.f/M_PI; float t_rho=rhomin+irhoMax*drho; float t_phi=phimin+iphiMax*dphi; //float q=t_rho/sin(t_phi); //float xm=-1/tan(t_phi); //cout << acc <<" "<< t_rho <<" "<< t_phi << " " << isecMax << endl; //} //} //} } } } } } } } #ifdef VERBOSE_DUMP cout << "NMrel from CPU "<< host_NMrel << endl; #endif checkCudaErrors(cudaFree(dev_accMat)); } #ifndef CUDA_MANAGED_TRANSFER #ifdef CUDA_MALLOCHOST_OUTPUT checkCudaErrors(cudaFreeHost(host_out_tracks)); #endif #endif return 0; } /***************************** * file opener *****************************/ void read_inputFile(string file_path, unsigned int num_hits) { ifstream input_f; string line; string value; stringstream ss; unsigned int val_iter; unsigned int line_read = 0; input_f.open(file_path.c_str()); if (input_f.is_open()) { while ( getline (input_f,line) && (line_read < num_hits) ) { val_iter = 0; ss.str(line); //prendiamo dati direttamente dal file ASCII in input while(ss >> value){ //i valori che ci interessano sono X, Y e Z if (val_iter == 0) x_values.push_back(atof(value.c_str())); else if (val_iter == 1) y_values.push_back(atof(value.c_str())); else if (val_iter == 2) z_values.push_back(atof(value.c_str())); val_iter++; } ss.clear(); line_read++; } input_f.close(); } }
LinearBinning.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void LinearBinning(float *vec, int *bin, int *bin_counters, const int num_bins, const int MaxBin, const int n, const float slope, const float intercept) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; float temp = abs(vec[xIndex]); if ( xIndex < n ){ if ( temp > (intercept *.000001) ){ bin[xIndex]=max(0.0f,slope * (intercept - temp)); if (bin[xIndex]<MaxBin) atomicAdd(bin_counters+bin[xIndex],1); } else bin[xIndex] = slope * intercept + 1.0f; } }
LinearBinning.cu
#include "includes.h" __global__ void LinearBinning(float *vec, int *bin, int *bin_counters, const int num_bins, const int MaxBin, const int n, const float slope, const float intercept) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; float temp = abs(vec[xIndex]); if ( xIndex < n ){ if ( temp > (intercept *.000001) ){ bin[xIndex]=max(0.0f,slope * (intercept - temp)); if (bin[xIndex]<MaxBin) atomicAdd(bin_counters+bin[xIndex],1); } else bin[xIndex] = slope * intercept + 1.0f; } }
e33800f3a963ae447410a08cc246e1b1d0de41c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; __global__ void mini(int *a) { int tid = threadIdx.x; int step_size = 1; int n_thread = blockDim.x; int f,s; while(n_thread>0) { if(tid<n_thread) { f = tid*step_size*2; s = f + step_size; if(a[f]>=a[s]) a[f] = a[s]; } step_size<<=1; n_thread>>=1; } } int main() { int *a,*b,size,n; int *d_a; cin>>n; size = n*sizeof(int); a = (int *)malloc(size); b = (int *)malloc(sizeof(int)); hipMalloc(&d_a,size); for(int i=0;i<n;i++) { a[i] = rand()%100; } for(int i=0;i<n;i++) { cout<<a[i]<<" "; } hipMemcpy(d_a,a,size,hipMemcpyHostToDevice); clock_t start = clock(); hipLaunchKernelGGL(( mini), dim3(1),dim3(n/2), 0, 0, d_a); cout<<"time: "<<(float)(clock()-start)/CLOCKS_PER_SEC<<endl; hipMemcpy(b,d_a,size,hipMemcpyDeviceToHost); cout<<"min is :"<<b[0]; return 0; }
e33800f3a963ae447410a08cc246e1b1d0de41c7.cu
#include <iostream> using namespace std; __global__ void mini(int *a) { int tid = threadIdx.x; int step_size = 1; int n_thread = blockDim.x; int f,s; while(n_thread>0) { if(tid<n_thread) { f = tid*step_size*2; s = f + step_size; if(a[f]>=a[s]) a[f] = a[s]; } step_size<<=1; n_thread>>=1; } } int main() { int *a,*b,size,n; int *d_a; cin>>n; size = n*sizeof(int); a = (int *)malloc(size); b = (int *)malloc(sizeof(int)); cudaMalloc(&d_a,size); for(int i=0;i<n;i++) { a[i] = rand()%100; } for(int i=0;i<n;i++) { cout<<a[i]<<" "; } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); clock_t start = clock(); mini<<<1,n/2>>>(d_a); cout<<"time: "<<(float)(clock()-start)/CLOCKS_PER_SEC<<endl; cudaMemcpy(b,d_a,size,cudaMemcpyDeviceToHost); cout<<"min is :"<<b[0]; return 0; }
02c288f079abfb5789fdd6c8dc8b01917f99c35e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kCopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *srcStart = NULL; hipMalloc(&srcStart, XSIZE*YSIZE); float *destStart = NULL; hipMalloc(&destStart, XSIZE*YSIZE); unsigned int copyWidth = 1; unsigned int jumpWidth = 1; unsigned int numElements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, srcStart,destStart,copyWidth,jumpWidth,numElements); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, srcStart,destStart,copyWidth,jumpWidth,numElements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kCopy), dim3(gridBlock),dim3(threadBlock), 0, 0, srcStart,destStart,copyWidth,jumpWidth,numElements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
02c288f079abfb5789fdd6c8dc8b01917f99c35e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kCopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *srcStart = NULL; cudaMalloc(&srcStart, XSIZE*YSIZE); float *destStart = NULL; cudaMalloc(&destStart, XSIZE*YSIZE); unsigned int copyWidth = 1; unsigned int jumpWidth = 1; unsigned int numElements = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kCopy<<<gridBlock,threadBlock>>>(srcStart,destStart,copyWidth,jumpWidth,numElements); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kCopy<<<gridBlock,threadBlock>>>(srcStart,destStart,copyWidth,jumpWidth,numElements); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kCopy<<<gridBlock,threadBlock>>>(srcStart,destStart,copyWidth,jumpWidth,numElements); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
68b722d56f179336204d0ad5c83c8c0d788c163c.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "stdlib.h" #include <hip/hip_runtime.h> #define SIZE 4 __global__ void matrixMultiplicationKernel(int* A, int* B, int* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; int tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } int main (){ //I'm using vectors with 16 elements to represent the matrix (4 rows with 4 values) int *first = (int*) malloc(SIZE * SIZE * sizeof(int)); int *second = (int*) malloc(SIZE * SIZE * sizeof(int)); int *result = (int*) malloc(SIZE * SIZE * sizeof(int)); //Fill local data: for (int i = 0; i < SIZE; i++){ for (int j = 0; j < SIZE; j++){ //i+j gives the position (columb) in the row, for each row. first[i+j] = i; second[i+j] = j; } } int *first_gpu; int *second_gpu; int *result_gpu; //Iterations int N=SIZE; //size of vector //Number of blocks int nBytes = N*N*sizeof(int); //Block size and number int block_size, block_no; block_size = 250; //threads per block block_no = N/block_size; printf("Allocating device memory on host..\n"); //GPU memory allocation hipMalloc((void **) &first_gpu, nBytes); hipMalloc((void **) &second_gpu, nBytes); hipMalloc((void **) &result_gpu, nBytes); //Work definition//////////////////// dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(block_no, 1, 1); ///////////////////////////////////// printf("Copying to device..\n"); hipMemcpy(first_gpu, first, nBytes, hipMemcpyHostToDevice); hipMemcpy(second_gpu, second, nBytes, hipMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Matrix Multiplication\n"); hipLaunchKernelGGL(( matrixMultiplicationKernel), dim3(block_no),dim3(block_size), 0, 0, first_gpu, second_gpu, result_gpu, N*N); //cudaCheckError(); clock_t end_d = clock(); //Wait for kernel call to finish hipDeviceSynchronize(); //Copying data back to host, this is a blocking call and will not start until all kernels are finished hipMemcpy(result, result_gpu, nBytes, hipMemcpyDeviceToHost); double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; printf("Time it took on GPU: %f", time_d); //Free GPU memory hipFree(first_gpu); hipFree(second_gpu); hipFree(result_gpu); return 0; }
68b722d56f179336204d0ad5c83c8c0d788c163c.cu
#include "stdio.h" #include "stdlib.h" #include <cuda.h> #define SIZE 4 __global__ void matrixMultiplicationKernel(int* A, int* B, int* C, int N) { int ROW = blockIdx.y*blockDim.y+threadIdx.y; int COL = blockIdx.x*blockDim.x+threadIdx.x; int tmpSum = 0; if (ROW < N && COL < N) { // each thread computes one element of the block sub-matrix for (int i = 0; i < N; i++) { tmpSum += A[ROW * N + i] * B[i * N + COL]; } } C[ROW * N + COL] = tmpSum; } int main (){ //I'm using vectors with 16 elements to represent the matrix (4 rows with 4 values) int *first = (int*) malloc(SIZE * SIZE * sizeof(int)); int *second = (int*) malloc(SIZE * SIZE * sizeof(int)); int *result = (int*) malloc(SIZE * SIZE * sizeof(int)); //Fill local data: for (int i = 0; i < SIZE; i++){ for (int j = 0; j < SIZE; j++){ //i+j gives the position (columb) in the row, for each row. first[i+j] = i; second[i+j] = j; } } int *first_gpu; int *second_gpu; int *result_gpu; //Iterations int N=SIZE; //size of vector //Number of blocks int nBytes = N*N*sizeof(int); //Block size and number int block_size, block_no; block_size = 250; //threads per block block_no = N/block_size; printf("Allocating device memory on host..\n"); //GPU memory allocation cudaMalloc((void **) &first_gpu, nBytes); cudaMalloc((void **) &second_gpu, nBytes); cudaMalloc((void **) &result_gpu, nBytes); //Work definition//////////////////// dim3 dimBlock(block_size, 1, 1); dim3 dimGrid(block_no, 1, 1); ///////////////////////////////////// printf("Copying to device..\n"); cudaMemcpy(first_gpu, first, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(second_gpu, second, nBytes, cudaMemcpyHostToDevice); clock_t start_d=clock(); printf("Doing GPU Matrix Multiplication\n"); matrixMultiplicationKernel<<<block_no,block_size>>>(first_gpu, second_gpu, result_gpu, N*N); //cudaCheckError(); clock_t end_d = clock(); //Wait for kernel call to finish cudaThreadSynchronize(); //Copying data back to host, this is a blocking call and will not start until all kernels are finished cudaMemcpy(result, result_gpu, nBytes, cudaMemcpyDeviceToHost); double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC; printf("Time it took on GPU: %f", time_d); //Free GPU memory cudaFree(first_gpu); cudaFree(second_gpu); cudaFree(result_gpu); return 0; }
60311197d8809f257143486912f02e5ca4a1ce87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file 3D Optical flow using NVIDIA CUDA * @author Institute for Photon Science and Synchrotron Radiation, Karlsruhe Institute of Technology * * @date 2015-2018 * @version 0.5.0 * * * @section LICENSE * * This program is copyrighted by the author and Institute for Photon Science and Synchrotron Radiation, * Karlsruhe Institute of Technology, Karlsruhe, Germany; * * */ #include <device_launch_parameters.h> #define __HIPCC__ #include <hip/device_functions.h> #include <math_functions.h> #include "src/data_types/data_structs.h" #define IND(X, Y, Z) (((Z) * container_size.height + (Y)) * (container_size.pitch / sizeof(float)) + (X)) #define SIND(X, Y, Z) ((((Z) + radius_2) * shared_block_size.y + ((Y) + radius_2)) * shared_block_size.x + ((X) + radius_2)) __constant__ DataSize4 container_size; extern __shared__ float shared[];\ __device__ void sort(float* buffer, size_t length) { for (int i = 0; i < length - 1; i++) { for (int k = 0; k < length - i - 1; k++) { if (buffer[k] > buffer[k + 1]) { float a = buffer[k]; buffer[k] = buffer[k + 1]; buffer[k + 1] = a; } } } } /* See a note about the thread block size in cuda_operation_median.cpp file.*/ extern "C" __global__ void median_3d( const float* input, size_t width, size_t height, size_t depth, size_t radius, float* output) { int radius_2 = radius / 2; dim3 shared_block_size( blockDim.x + 2 * radius_2, blockDim.y + 2 * radius_2, blockDim.z + 2 * radius_2); dim3 global_id( blockDim.x * blockIdx.x + threadIdx.x, blockDim.y * blockIdx.y + threadIdx.y, blockDim.z * blockIdx.z + threadIdx.z); /* Load data to the shared memoty */ size_t global_x = global_id.x < width ? global_id.x : 2 * width - global_id.x - 2; size_t global_y = global_id.y < height ? global_id.y : 2 * height - global_id.y - 2; size_t global_z = global_id.z < depth ? global_id.z : 2 * depth - global_id.z - 2; /* Main area */ shared[SIND(threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y, global_z)]; /* Left slice */ if (threadIdx.x < radius_2) { int offset = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; size_t global_x_l = offset >= 0 ? offset : -offset; shared[SIND(-radius_2 + threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x_l, global_y, global_z)]; } /* Right slice */ if (threadIdx.x > blockDim.x - 1 - radius_2) { int index = blockDim.x - threadIdx.x; int offset = blockDim.x *(blockIdx.x + 1) + radius_2 - index; size_t global_x_r = offset < width ? offset : 2 * width - offset - 2; shared[SIND(radius_2 + threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x_r, global_y, global_z)]; } /* Upper slice */ if (threadIdx.y < radius_2) { int offset = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; size_t global_y_u = offset >= 0 ? offset : -offset; shared[SIND(threadIdx.x, -radius_2 + threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y_u, global_z)]; } /* Bottom slice */ if (threadIdx.y > blockDim.y - 1 - radius_2) { int index = blockDim.y - threadIdx.y; int offset = blockDim.y *(blockIdx.y + 1) + radius_2 - index; size_t global_y_b = offset < height ? offset : 2 * height - offset - 2; shared[SIND(threadIdx.x, radius_2 + threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y_b, global_z)]; } /* Front slice */ if (threadIdx.z < radius_2) { int offset = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; size_t global_z_f = offset >= 0 ? offset : -offset; shared[SIND(threadIdx.x, threadIdx.y, -radius_2 + threadIdx.z)] = input[IND(global_x, global_y, global_z_f)]; } /* Rear slice */ if (threadIdx.z > blockDim.z - 1 - radius_2) { int index = blockDim.z - threadIdx.z; int offset = blockDim.z *(blockIdx.z + 1) + radius_2 - index; size_t global_z_r= offset < depth ? offset : 2 * depth - offset - 2; shared[SIND(threadIdx.x, threadIdx.y, radius_2 + threadIdx.z)] = input[IND(global_x, global_y, global_z_r)]; } /* 12 edges */ { int global_x_e; int global_y_e; int global_z_e; /* 4 along X */ if (threadIdx.y < radius_2 && threadIdx.z < radius_2) { /* Front upper */ global_y_e = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_e = global_y_e > 0 ? global_y_e : -global_y_e; global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x, threadIdx.y - radius_2, threadIdx.z - radius_2)] = input[IND(global_x, global_y_e, global_z_e)]; /* Rear upper */ global_z_e = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_e = global_z_e < depth ? global_z_e : 2 * depth - global_z_e - 2; shared[SIND(threadIdx.x, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x, global_y_e, global_z_e)]; /* Rear bottom */ global_y_e = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_e = global_y_e < height ? global_y_e : 2 * height - global_y_e - 2; shared[SIND(threadIdx.x, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x, global_y_e, global_z_e)]; /* Front bottom */ global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x, global_y_e, global_z_e)]; } /* 4 along Y */ if (threadIdx.x < radius_2 && threadIdx.z < radius_2) { /* Front left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x - radius_2, threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_e, global_y, global_z_e)]; /* Rear left */ global_z_e = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_e = global_z_e < depth ? global_z_e : 2 * depth - global_z_e - 2; shared[SIND(threadIdx.x - radius_2, threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_e, global_y, global_z_e)]; /* Rear right */ global_x_e = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_e = global_x_e < width ? global_x_e : 2 * width - global_x_e - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_e, global_y, global_z_e)]; /* Front right */ global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_e, global_y, global_z_e)]; } /* 4 along Z */ if (threadIdx.x < radius_2 && threadIdx.y < radius_2) { /* Upper left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; global_y_e = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_e = global_y_e > 0 ? global_y_e : -global_y_e; shared[SIND(threadIdx.x - radius_2, threadIdx.y - radius_2, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Upper riight */ global_x_e = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_e = global_x_e < width ? global_x_e : 2 * width - global_x_e - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Bottom right */ global_y_e = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_e = global_y_e < height ? global_y_e : 2 * height - global_y_e - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Bottom left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; } } /* 8 corners */ { int global_x_c; int global_y_c; int global_z_c; if (threadIdx.x < radius_2 && threadIdx.y < radius_2 && threadIdx.z < radius_2) { /* Front upper left */ global_x_c = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_c = global_x_c > 0 ? global_x_c : -global_x_c; global_y_c = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_c = global_y_c > 0 ? global_y_c : -global_y_c; global_z_c = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_c = global_z_c > 0 ? global_z_c : -global_z_c; shared[SIND(threadIdx.x - radius_2,threadIdx.y - radius_2,threadIdx.z -radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front upper right */ global_x_c = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_c = global_x_c < width ? global_x_c : 2 * width - global_x_c - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front bottom right */ global_y_c = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_c = global_y_c < height ? global_y_c : 2 * height - global_y_c - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front bottom left */ global_x_c = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_c = global_x_c > 0 ? global_x_c : -global_x_c; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear bottom left */ global_z_c = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_c = global_z_c < depth ? global_z_c : 2 * depth - global_z_c - 2; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear upper left */ global_y_c = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_c = global_y_c > 0 ? global_y_c : -global_y_c; shared[SIND(threadIdx.x - radius_2, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear upper right */ global_x_c = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_c = global_x_c < width ? global_x_c : 2 * width - global_x_c - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear bottom right */ global_y_c = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_c = global_y_c < height ? global_y_c : 2 * height - global_y_c - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; } } __syncthreads(); if (global_id.x < width && global_id.y < height && global_id.z < depth) { float buffer[343]; /* Max supported radius is 7, we have to store 7*7*7 values. */ for (size_t iz = 0; iz < radius; ++iz) { for (size_t iy = 0; iy < radius; ++iy) { for (size_t ix = 0; ix < radius; ++ix) { size_t lx = threadIdx.x - ix + radius_2; size_t ly = threadIdx.y - iy + radius_2; size_t lz = threadIdx.z - iz + radius_2; buffer[(iz * radius + iy) * radius + ix] = shared[SIND(lx, ly, lz)]; } } } size_t length = radius * radius * radius; sort(buffer, length); output[IND(global_id.x, global_id.y, global_id.z)] = buffer[length / 2]; } }
60311197d8809f257143486912f02e5ca4a1ce87.cu
/** * @file 3D Optical flow using NVIDIA CUDA * @author Institute for Photon Science and Synchrotron Radiation, Karlsruhe Institute of Technology * * @date 2015-2018 * @version 0.5.0 * * * @section LICENSE * * This program is copyrighted by the author and Institute for Photon Science and Synchrotron Radiation, * Karlsruhe Institute of Technology, Karlsruhe, Germany; * * */ #include <device_launch_parameters.h> #define __CUDACC__ #include <device_functions.h> #include <math_functions.h> #include "src/data_types/data_structs.h" #define IND(X, Y, Z) (((Z) * container_size.height + (Y)) * (container_size.pitch / sizeof(float)) + (X)) #define SIND(X, Y, Z) ((((Z) + radius_2) * shared_block_size.y + ((Y) + radius_2)) * shared_block_size.x + ((X) + radius_2)) __constant__ DataSize4 container_size; extern __shared__ float shared[];\ __device__ void sort(float* buffer, size_t length) { for (int i = 0; i < length - 1; i++) { for (int k = 0; k < length - i - 1; k++) { if (buffer[k] > buffer[k + 1]) { float a = buffer[k]; buffer[k] = buffer[k + 1]; buffer[k + 1] = a; } } } } /* See a note about the thread block size in cuda_operation_median.cpp file.*/ extern "C" __global__ void median_3d( const float* input, size_t width, size_t height, size_t depth, size_t radius, float* output) { int radius_2 = radius / 2; dim3 shared_block_size( blockDim.x + 2 * radius_2, blockDim.y + 2 * radius_2, blockDim.z + 2 * radius_2); dim3 global_id( blockDim.x * blockIdx.x + threadIdx.x, blockDim.y * blockIdx.y + threadIdx.y, blockDim.z * blockIdx.z + threadIdx.z); /* Load data to the shared memoty */ size_t global_x = global_id.x < width ? global_id.x : 2 * width - global_id.x - 2; size_t global_y = global_id.y < height ? global_id.y : 2 * height - global_id.y - 2; size_t global_z = global_id.z < depth ? global_id.z : 2 * depth - global_id.z - 2; /* Main area */ shared[SIND(threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y, global_z)]; /* Left slice */ if (threadIdx.x < radius_2) { int offset = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; size_t global_x_l = offset >= 0 ? offset : -offset; shared[SIND(-radius_2 + threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x_l, global_y, global_z)]; } /* Right slice */ if (threadIdx.x > blockDim.x - 1 - radius_2) { int index = blockDim.x - threadIdx.x; int offset = blockDim.x *(blockIdx.x + 1) + radius_2 - index; size_t global_x_r = offset < width ? offset : 2 * width - offset - 2; shared[SIND(radius_2 + threadIdx.x, threadIdx.y, threadIdx.z)] = input[IND(global_x_r, global_y, global_z)]; } /* Upper slice */ if (threadIdx.y < radius_2) { int offset = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; size_t global_y_u = offset >= 0 ? offset : -offset; shared[SIND(threadIdx.x, -radius_2 + threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y_u, global_z)]; } /* Bottom slice */ if (threadIdx.y > blockDim.y - 1 - radius_2) { int index = blockDim.y - threadIdx.y; int offset = blockDim.y *(blockIdx.y + 1) + radius_2 - index; size_t global_y_b = offset < height ? offset : 2 * height - offset - 2; shared[SIND(threadIdx.x, radius_2 + threadIdx.y, threadIdx.z)] = input[IND(global_x, global_y_b, global_z)]; } /* Front slice */ if (threadIdx.z < radius_2) { int offset = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; size_t global_z_f = offset >= 0 ? offset : -offset; shared[SIND(threadIdx.x, threadIdx.y, -radius_2 + threadIdx.z)] = input[IND(global_x, global_y, global_z_f)]; } /* Rear slice */ if (threadIdx.z > blockDim.z - 1 - radius_2) { int index = blockDim.z - threadIdx.z; int offset = blockDim.z *(blockIdx.z + 1) + radius_2 - index; size_t global_z_r= offset < depth ? offset : 2 * depth - offset - 2; shared[SIND(threadIdx.x, threadIdx.y, radius_2 + threadIdx.z)] = input[IND(global_x, global_y, global_z_r)]; } /* 12 edges */ { int global_x_e; int global_y_e; int global_z_e; /* 4 along X */ if (threadIdx.y < radius_2 && threadIdx.z < radius_2) { /* Front upper */ global_y_e = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_e = global_y_e > 0 ? global_y_e : -global_y_e; global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x, threadIdx.y - radius_2, threadIdx.z - radius_2)] = input[IND(global_x, global_y_e, global_z_e)]; /* Rear upper */ global_z_e = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_e = global_z_e < depth ? global_z_e : 2 * depth - global_z_e - 2; shared[SIND(threadIdx.x, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x, global_y_e, global_z_e)]; /* Rear bottom */ global_y_e = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_e = global_y_e < height ? global_y_e : 2 * height - global_y_e - 2; shared[SIND(threadIdx.x, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x, global_y_e, global_z_e)]; /* Front bottom */ global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x, global_y_e, global_z_e)]; } /* 4 along Y */ if (threadIdx.x < radius_2 && threadIdx.z < radius_2) { /* Front left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(threadIdx.x - radius_2, threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_e, global_y, global_z_e)]; /* Rear left */ global_z_e = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_e = global_z_e < depth ? global_z_e : 2 * depth - global_z_e - 2; shared[SIND(threadIdx.x - radius_2, threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_e, global_y, global_z_e)]; /* Rear right */ global_x_e = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_e = global_x_e < width ? global_x_e : 2 * width - global_x_e - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_e, global_y, global_z_e)]; /* Front right */ global_z_e = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_e = global_z_e > 0 ? global_z_e : -global_z_e; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_e, global_y, global_z_e)]; } /* 4 along Z */ if (threadIdx.x < radius_2 && threadIdx.y < radius_2) { /* Upper left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; global_y_e = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_e = global_y_e > 0 ? global_y_e : -global_y_e; shared[SIND(threadIdx.x - radius_2, threadIdx.y - radius_2, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Upper riight */ global_x_e = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_e = global_x_e < width ? global_x_e : 2 * width - global_x_e - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Bottom right */ global_y_e = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_e = global_y_e < height ? global_y_e : 2 * height - global_y_e - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; /* Bottom left */ global_x_e = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_e = global_x_e > 0 ? global_x_e : -global_x_e; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, threadIdx.z)] = input[IND(global_x_e, global_y_e, global_z)]; } } /* 8 corners */ { int global_x_c; int global_y_c; int global_z_c; if (threadIdx.x < radius_2 && threadIdx.y < radius_2 && threadIdx.z < radius_2) { /* Front upper left */ global_x_c = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_c = global_x_c > 0 ? global_x_c : -global_x_c; global_y_c = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_c = global_y_c > 0 ? global_y_c : -global_y_c; global_z_c = blockDim.z * blockIdx.z - radius_2 + threadIdx.z; global_z_c = global_z_c > 0 ? global_z_c : -global_z_c; shared[SIND(threadIdx.x - radius_2,threadIdx.y - radius_2,threadIdx.z -radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front upper right */ global_x_c = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_c = global_x_c < width ? global_x_c : 2 * width - global_x_c - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front bottom right */ global_y_c = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_c = global_y_c < height ? global_y_c : 2 * height - global_y_c - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Front bottom left */ global_x_c = blockDim.x * blockIdx.x - radius_2 + threadIdx.x; global_x_c = global_x_c > 0 ? global_x_c : -global_x_c; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, threadIdx.z - radius_2)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear bottom left */ global_z_c = blockDim.z *(blockIdx.z + 1) + threadIdx.z; global_z_c = global_z_c < depth ? global_z_c : 2 * depth - global_z_c - 2; shared[SIND(threadIdx.x - radius_2, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear upper left */ global_y_c = blockDim.y * blockIdx.y - radius_2 + threadIdx.y; global_y_c = global_y_c > 0 ? global_y_c : -global_y_c; shared[SIND(threadIdx.x - radius_2, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear upper right */ global_x_c = blockDim.x *(blockIdx.x + 1) + threadIdx.x; global_x_c = global_x_c < width ? global_x_c : 2 * width - global_x_c - 2; shared[SIND(blockDim.x + threadIdx.x, threadIdx.y - radius_2, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; /* Rear bottom right */ global_y_c = blockDim.y *(blockIdx.y + 1) + threadIdx.y; global_y_c = global_y_c < height ? global_y_c : 2 * height - global_y_c - 2; shared[SIND(blockDim.x + threadIdx.x, blockDim.y + threadIdx.y, blockDim.z + threadIdx.z)] = input[IND(global_x_c, global_y_c, global_z_c)]; } } __syncthreads(); if (global_id.x < width && global_id.y < height && global_id.z < depth) { float buffer[343]; /* Max supported radius is 7, we have to store 7*7*7 values. */ for (size_t iz = 0; iz < radius; ++iz) { for (size_t iy = 0; iy < radius; ++iy) { for (size_t ix = 0; ix < radius; ++ix) { size_t lx = threadIdx.x - ix + radius_2; size_t ly = threadIdx.y - iy + radius_2; size_t lz = threadIdx.z - iz + radius_2; buffer[(iz * radius + iy) * radius + ix] = shared[SIND(lx, ly, lz)]; } } } size_t length = radius * radius * radius; sort(buffer, length); output[IND(global_id.x, global_id.y, global_id.z)] = buffer[length / 2]; } }
d40c72d3349a3cd668d790cdc0776b438b3d4a0b.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hip/driver_types.h> #include <gtest/gtest.h> #include <assert.h> #include <tests/def.h> #include "include/linked_list.h" #include "test_linked_list.h" #ifdef STATISTIC extern int amount; extern FILE * log_file; #else #define amount 500000 #endif class TestLinkedListDeviceOpt : public testing::Test{ public: int **values; int **values_device; int **values_arr_device; LinkedListItem ** item_address_on_device; LinkedListItem ** items; int *sizes; int *sizes_device; size_t usage; int count; void SetUp() override; void TearDown() override; }; void TestLinkedListDeviceOpt::SetUp(){ usage = 0; count = 0; hipError_t err; err = hipHostMalloc((void**)&values, sizeof(int*)*amount); // alloc values assert(err == hipSuccess); err = hipHostMalloc((void**)&sizes, sizeof(int*)*amount); // alloc sizes assert(err == hipSuccess); err = hipMalloc((void**)&sizes_device, sizeof(int)*amount);// alloc sizes_device assert(err == hipSuccess); err = hipHostMalloc((void**)&item_address_on_device, sizeof(LinkedListItem*)*amount); // init address_on_device; assert(err == hipSuccess); err = hipMalloc((void**)&items, sizeof(LinkedListItem*)*amount); // alloc items assert(err == hipSuccess); err = hipMalloc((void**)&values_device, sizeof(int*)*amount); // alloc values_device assert(err == hipSuccess); err = hipHostMalloc((void**)&values_arr_device, sizeof(int*)*amount); // alloc values_arr_device on host assert(err == hipSuccess); LinkedListItem *tmp; for(int i = 0; i < amount; ++i){ sizes[i] = rand() % 100+50; err = hipMalloc((void**)&tmp, sizeof(LinkedListItem)*sizes[i]); // alloc instance assert(err == hipSuccess); item_address_on_device[i] = tmp; err = hipHostMalloc((void**)&values[i], sizeof(int)*sizes[i]); // alloc values assert(err == hipSuccess); for(int j = 0; j < sizes[i]; ++j){ values[i][j] = rand() % 1024; } count += sizes[i]; } err = hipMemcpy(items, item_address_on_device, sizeof(LinkedListItem*)*amount, hipMemcpyHostToDevice); // cpy items ptr value assert(err == hipSuccess); int *values_tmp; for(int i = 0; i < amount; ++i){ err = hipMalloc((void**)&values_tmp, sizeof(int)*sizes[i]); assert(err == hipSuccess); err = hipMemcpy(values_tmp, values[i], sizeof(int)*sizes[i], hipMemcpyHostToDevice); assert(err == hipSuccess); values_arr_device[i] = values_tmp; } err = hipMemcpy(values_device, values_arr_device, sizeof(int*)*amount, hipMemcpyHostToDevice); // cpy values ptr value assert(err == hipSuccess); err = hipMemcpy(sizes_device, sizes, sizeof(int)*amount, hipMemcpyHostToDevice); // cpy sizes value assert(err == hipSuccess); } void TestLinkedListDeviceOpt::TearDown(){ // free item for(int i = 0 ; i < amount; ++i){ hipFree(item_address_on_device[i]); hipFree(values_arr_device[i]); hipHostFree(values[i]); } hipHostFree(sizes); hipFree(sizes_device); hipHostFree(values); hipFree(values_device); hipHostFree(values_arr_device); hipHostFree(item_address_on_device); hipFree(items); } __global__ void initLinkedListOpsKernel(LinkedListElementOperation * op){ op->setNext = __listEleSetNext; op->setPrev = __listEleSetPrev; op->init = initList; } __global__ void sortingSetupKernel(LinkedListItem **items, int **values, int *sizes, unsigned int offset, int am){ int idx = threadIdx.x + blockIdx.x * blockDim.x + offset; if(idx < am){ LinkedListElementOperation ops = LINKED_LIST_OPS(); for(int i = 0; i < sizes[idx]; ++i){ items[idx][i].value = values[idx][i]; initList(&items[idx][i].ele); items[idx][i].ele.getValue = linkedListItemGetValue; items[idx][i].ele.ptr_derived_object = &items[idx][i]; } for(int i = 0; i < sizes[idx] - 1; ++i){ ops.setNext(&items[idx][i].ele, &items[idx][i+1].ele); } } } __global__ void sorting(LinkedListItem **items, int **values, LinkedListElementOperation *ops,unsigned int offset, int am){ int idx = threadIdx.x + blockIdx.x * blockDim.x + offset; if(idx < am){ LinkedListElement *iter; iter = linkedListMergeSort(&items[idx][0].ele, ops); for(int i = 0; iter; ++i){ values[idx][i] = iter->getValue(iter); iter = iter->next; } } } TEST_F(TestLinkedListDeviceOpt, test_sort_linked_list_on_device_opt){ int ** result_arr; int **result_arr_device; int *result_tmp; hipHostMalloc((void**)&result_arr, sizeof(int*)*amount); for(int i = 0; i < amount; ++i){ ASSERT_EQ(hipMalloc((void**)&result_tmp, sizeof(int)*sizes[i]), hipSuccess); usage += sizeof(int)*sizes[i]; result_arr[i] = result_tmp; } ASSERT_EQ(hipMalloc((void**)&result_arr_device, sizeof(int*)*amount), hipSuccess); ASSERT_EQ(hipMemcpy(result_arr_device, result_arr, sizeof(int*)*amount, hipMemcpyHostToDevice), hipSuccess); usage += sizeof(int*)*amount; /********INIT OPS**********************************/ LinkedListElementOperation *ops_device; ASSERT_EQ(hipMalloc((void**)&ops_device, sizeof(LinkedListElementOperation)), hipSuccess); hipLaunchKernelGGL(( initLinkedListOpsKernel), dim3(1), dim3(1), 0, 0, ops_device); clock_t t1 = clock(); hipLaunchKernelGGL(( sortingSetupKernel), dim3(1024), dim3(1024), 0, 0, items, values_device, sizes_device, 0, amount); // sortingSetupKernel<<<1024, 1024>>>(items, values_device, sizes_device, 100000, amount); hipLaunchKernelGGL(( sorting), dim3(1024), dim3(1024), 0, 0, items, result_arr_device, ops_device, 0, amount); // sorting<<<1024, 1024>>>(items, result_arr_device, ops_device, 100000, amount); hipDeviceSynchronize(); clock_t t2 = clock(); PRINTF("Testing amount : %d\n", count); PRINTF("Device memory usage : %lu bytes\n", usage); PRINTF("Time elapse : %.3fs\n", (t2 - t1) / (double)CLOCKS_PER_SEC); #ifdef STATISTIC fprintf(log_file, "%.3f\n", (t2 - t1) / (double)CLOCKS_PER_SEC); #endif ASSERT_EQ(hipMemcpy(result_arr, result_arr_device, sizeof(int*)*amount, hipMemcpyDeviceToHost), hipSuccess); for(int i = 0; i < amount; ++i){ hipHostMalloc((void**)&result_tmp, sizeof(int)*sizes[i]); ASSERT_EQ(hipMemcpy(result_tmp, result_arr[i], sizeof(int)*sizes[i], hipMemcpyDeviceToHost), hipSuccess); qsort(values[i], sizes[i], sizeof(int), cmpint); for(int j = 0; j < sizes[i]; ++j){ ASSERT_EQ(values[i][j], result_tmp[j]); } hipHostFree(result_tmp); hipFree(result_arr[i]); } hipHostFree(result_arr); hipFree(result_arr_device); }
d40c72d3349a3cd668d790cdc0776b438b3d4a0b.cu
#include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <driver_types.h> #include <gtest/gtest.h> #include <assert.h> #include <tests/def.h> #include "include/linked_list.h" #include "test_linked_list.h" #ifdef STATISTIC extern int amount; extern FILE * log_file; #else #define amount 500000 #endif class TestLinkedListDeviceOpt : public testing::Test{ public: int **values; int **values_device; int **values_arr_device; LinkedListItem ** item_address_on_device; LinkedListItem ** items; int *sizes; int *sizes_device; size_t usage; int count; void SetUp() override; void TearDown() override; }; void TestLinkedListDeviceOpt::SetUp(){ usage = 0; count = 0; cudaError_t err; err = cudaMallocHost((void**)&values, sizeof(int*)*amount); // alloc values assert(err == cudaSuccess); err = cudaMallocHost((void**)&sizes, sizeof(int*)*amount); // alloc sizes assert(err == cudaSuccess); err = cudaMalloc((void**)&sizes_device, sizeof(int)*amount);// alloc sizes_device assert(err == cudaSuccess); err = cudaMallocHost((void**)&item_address_on_device, sizeof(LinkedListItem*)*amount); // init address_on_device; assert(err == cudaSuccess); err = cudaMalloc((void**)&items, sizeof(LinkedListItem*)*amount); // alloc items assert(err == cudaSuccess); err = cudaMalloc((void**)&values_device, sizeof(int*)*amount); // alloc values_device assert(err == cudaSuccess); err = cudaMallocHost((void**)&values_arr_device, sizeof(int*)*amount); // alloc values_arr_device on host assert(err == cudaSuccess); LinkedListItem *tmp; for(int i = 0; i < amount; ++i){ sizes[i] = rand() % 100+50; err = cudaMalloc((void**)&tmp, sizeof(LinkedListItem)*sizes[i]); // alloc instance assert(err == cudaSuccess); item_address_on_device[i] = tmp; err = cudaMallocHost((void**)&values[i], sizeof(int)*sizes[i]); // alloc values assert(err == cudaSuccess); for(int j = 0; j < sizes[i]; ++j){ values[i][j] = rand() % 1024; } count += sizes[i]; } err = cudaMemcpy(items, item_address_on_device, sizeof(LinkedListItem*)*amount, cudaMemcpyHostToDevice); // cpy items ptr value assert(err == cudaSuccess); int *values_tmp; for(int i = 0; i < amount; ++i){ err = cudaMalloc((void**)&values_tmp, sizeof(int)*sizes[i]); assert(err == cudaSuccess); err = cudaMemcpy(values_tmp, values[i], sizeof(int)*sizes[i], cudaMemcpyHostToDevice); assert(err == cudaSuccess); values_arr_device[i] = values_tmp; } err = cudaMemcpy(values_device, values_arr_device, sizeof(int*)*amount, cudaMemcpyHostToDevice); // cpy values ptr value assert(err == cudaSuccess); err = cudaMemcpy(sizes_device, sizes, sizeof(int)*amount, cudaMemcpyHostToDevice); // cpy sizes value assert(err == cudaSuccess); } void TestLinkedListDeviceOpt::TearDown(){ // free item for(int i = 0 ; i < amount; ++i){ cudaFree(item_address_on_device[i]); cudaFree(values_arr_device[i]); cudaFreeHost(values[i]); } cudaFreeHost(sizes); cudaFree(sizes_device); cudaFreeHost(values); cudaFree(values_device); cudaFreeHost(values_arr_device); cudaFreeHost(item_address_on_device); cudaFree(items); } __global__ void initLinkedListOpsKernel(LinkedListElementOperation * op){ op->setNext = __listEleSetNext; op->setPrev = __listEleSetPrev; op->init = initList; } __global__ void sortingSetupKernel(LinkedListItem **items, int **values, int *sizes, unsigned int offset, int am){ int idx = threadIdx.x + blockIdx.x * blockDim.x + offset; if(idx < am){ LinkedListElementOperation ops = LINKED_LIST_OPS(); for(int i = 0; i < sizes[idx]; ++i){ items[idx][i].value = values[idx][i]; initList(&items[idx][i].ele); items[idx][i].ele.getValue = linkedListItemGetValue; items[idx][i].ele.ptr_derived_object = &items[idx][i]; } for(int i = 0; i < sizes[idx] - 1; ++i){ ops.setNext(&items[idx][i].ele, &items[idx][i+1].ele); } } } __global__ void sorting(LinkedListItem **items, int **values, LinkedListElementOperation *ops,unsigned int offset, int am){ int idx = threadIdx.x + blockIdx.x * blockDim.x + offset; if(idx < am){ LinkedListElement *iter; iter = linkedListMergeSort(&items[idx][0].ele, ops); for(int i = 0; iter; ++i){ values[idx][i] = iter->getValue(iter); iter = iter->next; } } } TEST_F(TestLinkedListDeviceOpt, test_sort_linked_list_on_device_opt){ int ** result_arr; int **result_arr_device; int *result_tmp; cudaMallocHost((void**)&result_arr, sizeof(int*)*amount); for(int i = 0; i < amount; ++i){ ASSERT_EQ(cudaMalloc((void**)&result_tmp, sizeof(int)*sizes[i]), cudaSuccess); usage += sizeof(int)*sizes[i]; result_arr[i] = result_tmp; } ASSERT_EQ(cudaMalloc((void**)&result_arr_device, sizeof(int*)*amount), cudaSuccess); ASSERT_EQ(cudaMemcpy(result_arr_device, result_arr, sizeof(int*)*amount, cudaMemcpyHostToDevice), cudaSuccess); usage += sizeof(int*)*amount; /********INIT OPS**********************************/ LinkedListElementOperation *ops_device; ASSERT_EQ(cudaMalloc((void**)&ops_device, sizeof(LinkedListElementOperation)), cudaSuccess); initLinkedListOpsKernel<<<1, 1>>>(ops_device); clock_t t1 = clock(); sortingSetupKernel<<<1024, 1024>>>(items, values_device, sizes_device, 0, amount); // sortingSetupKernel<<<1024, 1024>>>(items, values_device, sizes_device, 100000, amount); sorting<<<1024, 1024>>>(items, result_arr_device, ops_device, 0, amount); // sorting<<<1024, 1024>>>(items, result_arr_device, ops_device, 100000, amount); cudaDeviceSynchronize(); clock_t t2 = clock(); PRINTF("Testing amount : %d\n", count); PRINTF("Device memory usage : %lu bytes\n", usage); PRINTF("Time elapse : %.3fs\n", (t2 - t1) / (double)CLOCKS_PER_SEC); #ifdef STATISTIC fprintf(log_file, "%.3f\n", (t2 - t1) / (double)CLOCKS_PER_SEC); #endif ASSERT_EQ(cudaMemcpy(result_arr, result_arr_device, sizeof(int*)*amount, cudaMemcpyDeviceToHost), cudaSuccess); for(int i = 0; i < amount; ++i){ cudaMallocHost((void**)&result_tmp, sizeof(int)*sizes[i]); ASSERT_EQ(cudaMemcpy(result_tmp, result_arr[i], sizeof(int)*sizes[i], cudaMemcpyDeviceToHost), cudaSuccess); qsort(values[i], sizes[i], sizeof(int), cmpint); for(int j = 0; j < sizes[i]; ++j){ ASSERT_EQ(values[i][j], result_tmp[j]); } cudaFreeHost(result_tmp); cudaFree(result_arr[i]); } cudaFreeHost(result_arr); cudaFree(result_arr_device); }
e12dfd5fe9ab3640dd7f24d058c8b8d3a3cf534c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "XSbench_header.cuh" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // port of the original CPU OpenMP code to CUDA with few significant changes or // optimizations made. Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype) { //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Running baseline event-based simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( xs_lookup_kernel_baseline), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } // In this kernel, we perform a single lookup with each thread. Threads within a warp // do not really have any relation to each other, and divergence due to high nuclide count fuel // material lookups are costly. This kernel constitutes baseline performance. __global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } // Calculates the microscopic cross section for a given nuclide & energy __device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy __device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * __restrict__ num_nucs, double * __restrict__ concs, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, int * __restrict__ mats, double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index __device__ long grid_search( long n, double quarry, double * __restrict__ A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid __host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution __device__ int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } __host__ __device__ double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } __device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies // specific to GPU. By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Basic kernel splitting of sampling & lookup routines //////////////////////////////////////////////////////////////////////////////////// // This optimization requires a little extra data to store all material IDs and // energies for the sampled particles between kernel calls. By itself, this // optimization is likely actually a bit of a slowdown compared to the baseline // kernel. However, it will be used by better optimization kernels down the line. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_1), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void sampling_kernel(Inputs in, SimulationData GSD ) { // The lookup ID. const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Store sample data in state array GSD.p_energy_samples[i] = p_energy; GSD.mat_samples[i] = mat; } __global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels //////////////////////////////////////////////////////////////////////////////////// // This one builds on the first optimization. It uses multiple kernels, one // for each material type, to better balance the workload across threads within // a warp. This works because each material will have a different number of // isotopes, with some having a ton, meaning that SIMD efficiency can be rather // low by default. Better efficiency may be gained in further optimizations by // sorting the lookups first. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 2 - Material Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Launch all material kernels individually for( int m = 0; m < 12; m++ ) hipLaunchKernelGGL(( xs_lookup_kernel_optimization_2), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups //////////////////////////////////////////////////////////////////////////////////// // This optimization alters Optimization 2. Instead of executing a kernel call for // ALL different material types, only two different calls are made. One for fuel, // and one for all the other materials. As the fuel material has by far the most // isotopes, it takes much longer than the rest. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Launch all material kernels individually hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 0 ); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_3), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, 1 ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; int mat = GSD.mat_samples[i]; // If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup // OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) )) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 2, adding in a full sort before // hand so that the warps should be densely packed together. This should maximize // SIMD efficiency of the kernel, but may incur an added cost for the sort. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort materials thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Launch all material kernels individually int offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition //////////////////////////////////////////////////////////////////////////////////// // This optimization is similar to optimization 4, but instead of sorting // fully by material, we just sort by fuel or not fuel. Similarly, instead of // launching kernels for all materials, similar to optimization 3 we only launch // kernels for the fuel and other mateirals. //////////////////////////////////////////////////////////////////////////////////// // Comparator for partitioning stage struct is_mat_fuel{ __host__ __device__ bool operator()(const int & a) { return a == 0; } }; unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_fuel_lookups = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, 0); // Partition fuel into the first part of the array thrust::partition(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); // Launch all material kernels individually (asynchronous is allowed) nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_5), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, n_fuel_lookups, 0 ); nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_5), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort // + Energy Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 4, adding in a second sort by energy. // It is extremely fast, as now most of the threads within a warp will be hitting // the same indices in the lookup grids. This greatly reduces thread divergence and // greatly improves cache efficiency and re-use. // // However, it is unlikely that this exact optimization would be possible in a real // application like OpenMC. One major difference is that particle objects are quite // large, often having 50+ variable fields, such that sorting them in memory becomes // rather expensive. Instead, the best possible option would probably be to create // intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( hipMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( hipMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); hipLaunchKernelGGL(( sampling_kernel), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD ); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort by material first thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Now, sort each material by energy int offset = 0; for( int m = 0; m < 12; m++ ) { thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset); offset += n_lookups_per_material[m]; } // Launch all material kernels individually offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); hipLaunchKernelGGL(( xs_lookup_kernel_optimization_4), dim3(nblocks), dim3(nthreads), 0, 0, in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); return verification_scalar; }
e12dfd5fe9ab3640dd7f24d058c8b8d3a3cf534c.cu
#include "XSbench_header.cuh" //////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // port of the original CPU OpenMP code to CUDA with few significant changes or // optimizations made. Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_baseline(Inputs in, SimulationData GSD, int mype) { //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Running baseline event-based simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); xs_lookup_kernel_baseline<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } // In this kernel, we perform a single lookup with each thread. Threads within a warp // do not really have any relation to each other, and divergence due to high nuclide count fuel // material lookups are costly. This kernel constitutes baseline performance. __global__ void xs_lookup_kernel_baseline(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } // Calculates the microscopic cross section for a given nuclide & energy __device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, long idx, double * __restrict__ xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint * low, * high; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index idx = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], 0, n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + idx - 1]; else low = &nuclide_grids[nuc*n_gridpoints + idx]; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1]; else low = &nuclide_grids[nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]]; } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; int lower; if( p_energy <= e_low ) lower = 0; else if( p_energy >= e_high ) lower = n_gridpoints - 1; else lower = grid_search_nuclide( n_gridpoints, p_energy, &nuclide_grids[nuc*n_gridpoints], u_low, u_high); if( lower == n_gridpoints - 1 ) low = &nuclide_grids[nuc*n_gridpoints + lower - 1]; else low = &nuclide_grids[nuc*n_gridpoints + lower]; } high = low + 1; // calculate the re-useable interpolation factor f = (high->energy - p_energy) / (high->energy - low->energy); // Total XS xs_vector[0] = high->total_xs - f * (high->total_xs - low->total_xs); // Elastic XS xs_vector[1] = high->elastic_xs - f * (high->elastic_xs - low->elastic_xs); // Absorbtion XS xs_vector[2] = high->absorbtion_xs - f * (high->absorbtion_xs - low->absorbtion_xs); // Fission XS xs_vector[3] = high->fission_xs - f * (high->fission_xs - low->fission_xs); // Nu Fission XS xs_vector[4] = high->nu_fission_xs - f * (high->nu_fission_xs - low->nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy __device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, int * __restrict__ num_nucs, double * __restrict__ concs, double * __restrict__ egrid, int * __restrict__ index_data, NuclideGridPoint * __restrict__ nuclide_grids, int * __restrict__ mats, double * __restrict__ macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // binary search for energy on unionized energy grid // returns lower index __device__ long grid_search( long n, double quarry, double * __restrict__ A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // binary search for energy on nuclide energy grid __host__ __device__ long grid_search_nuclide( long n, double quarry, NuclideGridPoint * A, long low, long high) { long lowerLimit = low; long upperLimit = high; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint].energy > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // picks a material based on a probabilistic distribution __device__ int pick_mat( uint64_t * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } __host__ __device__ double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } __device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; } //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // OPTIMIZED VARIANT FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // This section contains a number of optimized variants of some of the above // functions, which each deploy a different combination of optimizations strategies // specific to GPU. By default, XSBench will not run any of these variants. They // must be specifically selected using the "-k <optimized variant ID>" command // line argument. //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////// // Optimization 1 -- Basic kernel splitting of sampling & lookup routines //////////////////////////////////////////////////////////////////////////////////// // This optimization requires a little extra data to store all material IDs and // energies for the sampled particles between kernel calls. By itself, this // optimization is likely actually a bit of a slowdown compared to the baseline // kernel. However, it will be used by better optimization kernels down the line. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_1(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 1 - basic sample/lookup kernel splitting"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); xs_lookup_kernel_optimization_1<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void sampling_kernel(Inputs in, SimulationData GSD ) { // The lookup ID. const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // Store sample data in state array GSD.p_energy_samples[i] = p_energy; GSD.mat_samples[i] = mat; } __global__ void xs_lookup_kernel_optimization_1(Inputs in, SimulationData GSD ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 2 -- Kernel Splitting + Material-Specific Lookup Kernels //////////////////////////////////////////////////////////////////////////////////// // This one builds on the first optimization. It uses multiple kernels, one // for each material type, to better balance the workload across threads within // a warp. This works because each material will have a different number of // isotopes, with some having a ton, meaning that SIMD efficiency can be rather // low by default. Better efficiency may be gained in further optimizations by // sorting the lookups first. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_2(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 2 - Material Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Launch all material kernels individually for( int m = 0; m < 12; m++ ) xs_lookup_kernel_optimization_2<<<nblocks, nthreads>>>( in, GSD, m ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_2(Inputs in, SimulationData GSD, int m ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 3 -- Kernel Splitting + Fuel or Not-Fuel Lookups //////////////////////////////////////////////////////////////////////////////////// // This optimization alters Optimization 2. Instead of executing a kernel call for // ALL different material types, only two different calls are made. One for fuel, // and one for all the other materials. As the fuel material has by far the most // isotopes, it takes much longer than the rest. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_3(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 3 - Fuel or Other Lookup Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Launch all material kernels individually xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 0 ); xs_lookup_kernel_optimization_3<<<nblocks, nthreads>>>( in, GSD, 1 ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_3(Inputs in, SimulationData GSD, int is_fuel ) { // The lookup ID. Used to set the seed, and to store the verification value const int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= in.lookups ) return; int mat = GSD.mat_samples[i]; // If this is the fuel kernel, AND this is a fuel lookup, then perform a lookup // OR if this is not the fuel kernel, AND this is not a fuel lookup, then perform the lookup if( ((is_fuel == 1) && (mat == 0)) || ((is_fuel == 0) && (mat != 0 ) )) { double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } } //////////////////////////////////////////////////////////////////////////////////// // Optimization 4 -- Kernel Splitting + All Material Lookups + Full Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 2, adding in a full sort before // hand so that the warps should be densely packed together. This should maximize // SIMD efficiency of the kernel, but may incur an added cost for the sort. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_4(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 4 - All Material Lookup Kernels + Material Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort materials thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Launch all material kernels individually int offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_4(Inputs in, SimulationData GSD, int m, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; // Check that our material type matches the kernel material int mat = GSD.mat_samples[i]; if( mat != m ) return; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 5 -- Kernel Splitting + Fuel/Other Lookups + Fuel/Other Partition //////////////////////////////////////////////////////////////////////////////////// // This optimization is similar to optimization 4, but instead of sorting // fully by material, we just sort by fuel or not fuel. Similarly, instead of // launching kernels for all materials, similar to optimization 3 we only launch // kernels for the fuel and other mateirals. //////////////////////////////////////////////////////////////////////////////////// // Comparator for partitioning stage struct is_mat_fuel{ __host__ __device__ bool operator()(const int & a) { return a == 0; } }; unsigned long long run_event_based_simulation_optimization_5(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 5 - Fuel/No Fuel Lookup Kernels + Fuel/No Fuel Sort"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_fuel_lookups = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, 0); // Partition fuel into the first part of the array thrust::partition(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples, is_mat_fuel()); // Launch all material kernels individually (asynchronous is allowed) nblocks = ceil( (double) n_fuel_lookups / (double) nthreads); xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, n_fuel_lookups, 0 ); nblocks = ceil( (double) (in.lookups - n_fuel_lookups) / (double) nthreads); xs_lookup_kernel_optimization_5<<<nblocks, nthreads>>>( in, GSD, in.lookups-n_fuel_lookups, n_fuel_lookups ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; } __global__ void xs_lookup_kernel_optimization_5(Inputs in, SimulationData GSD, int n_lookups, int offset ) { // The lookup ID. Used to set the seed, and to store the verification value int i = blockIdx.x *blockDim.x + threadIdx.x; if( i >= n_lookups ) return; i += offset; double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( GSD.p_energy_samples[i], // Sampled neutron energy (in lethargy) GSD.mat_samples[i], // Sampled material type index neutron is in in.n_isotopes, // Total number of isotopes in simulation in.n_gridpoints, // Number of gridpoints per isotope in simulation GSD.num_nucs, // 1-D array with number of nuclides per material GSD.concs, // Flattened 2-D array with concentration of each nuclide in each material GSD.unionized_energy_array, // 1-D Unionized energy array GSD.index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level GSD.nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation GSD.mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) in.grid_type, // Lookup type (nuclide, hash, or unionized) in.hash_bins, // Number of hash bins used (if using hash lookup type) GSD.max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we have each thread // write to its thread_id index in an array, which we will reduce // with a thrust reduction kernel after the main simulation kernel. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } GSD.verification[i] = max_idx+1; } //////////////////////////////////////////////////////////////////////////////////// // Optimization 6 -- Kernel Splitting + All Material Lookups + Full Sort // + Energy Sort //////////////////////////////////////////////////////////////////////////////////// // This optimization builds on optimization 4, adding in a second sort by energy. // It is extremely fast, as now most of the threads within a warp will be hitting // the same indices in the lookup grids. This greatly reduces thread divergence and // greatly improves cache efficiency and re-use. // // However, it is unlikely that this exact optimization would be possible in a real // application like OpenMC. One major difference is that particle objects are quite // large, often having 50+ variable fields, such that sorting them in memory becomes // rather expensive. Instead, the best possible option would probably be to create // intermediate indexing (per Hamilton et. al 2019), and run the kernels indirectly. //////////////////////////////////////////////////////////////////////////////////// unsigned long long run_event_based_simulation_optimization_6(Inputs in, SimulationData GSD, int mype) { const char * optimization_name = "Optimization 6 - Material & Energy Sorts + Material-specific Kernels"; if( mype == 0) printf("Simulation Kernel:\"%s\"\n", optimization_name); //////////////////////////////////////////////////////////////////////////////// // Allocate Additional Data Structures Needed by Optimized Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Allocating additional device data required by kernel...\n"); size_t sz; size_t total_sz = 0; sz = in.lookups * sizeof(double); gpuErrchk( cudaMalloc((void **) &GSD.p_energy_samples, sz) ); total_sz += sz; GSD.length_p_energy_samples = in.lookups; sz = in.lookups * sizeof(int); gpuErrchk( cudaMalloc((void **) &GSD.mat_samples, sz) ); total_sz += sz; GSD.length_mat_samples = in.lookups; if( mype == 0) printf("Allocated an additional %.0lf MB of data on GPU.\n", total_sz/1024.0/1024.0); //////////////////////////////////////////////////////////////////////////////// // Configure & Launch Simulation Kernel //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Beginning optimized simulation...\n"); int nthreads = 32; int nblocks = ceil( (double) in.lookups / 32.0); sampling_kernel<<<nblocks, nthreads>>>( in, GSD ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // Count the number of fuel material lookups that need to be performed (fuel id = 0) int n_lookups_per_material[12]; for( int m = 0; m < 12; m++ ) n_lookups_per_material[m] = thrust::count(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, m); // Sort by material first thrust::sort_by_key(thrust::device, GSD.mat_samples, GSD.mat_samples + in.lookups, GSD.p_energy_samples); // Now, sort each material by energy int offset = 0; for( int m = 0; m < 12; m++ ) { thrust::sort_by_key(thrust::device, GSD.p_energy_samples + offset, GSD.p_energy_samples + offset + n_lookups_per_material[m], GSD.mat_samples + offset); offset += n_lookups_per_material[m]; } // Launch all material kernels individually offset = 0; for( int m = 0; m < 12; m++ ) { nthreads = 32; nblocks = ceil((double) n_lookups_per_material[m] / (double) nthreads); xs_lookup_kernel_optimization_4<<<nblocks, nthreads>>>( in, GSD, m, n_lookups_per_material[m], offset ); offset += n_lookups_per_material[m]; } gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); //////////////////////////////////////////////////////////////////////////////// // Reduce Verification Results //////////////////////////////////////////////////////////////////////////////// if( mype == 0) printf("Reducing verification results...\n"); unsigned long verification_scalar = thrust::reduce(thrust::device, GSD.verification, GSD.verification + in.lookups, 0); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); return verification_scalar; }
decf6e5e83022f8f32a3278ed02060692e4fa4bc.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> // CPU code to do matrix ADdition void matrixAdd(int *a, int *b, int *c, int N) { int index; for(int col=0; col<N; col++) { for(int row=0; row<N; row++) { index = row * N + col; c[index] = a[index] + b[index]; } } } // GPU code to do matrix addition __global__ void matrixAddKernel(int *a, int *b, int *c, int N) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * N + col; if(col < N && row < N) { c[index] = a[index] + b[index]; } } int main(void) { hipSetDevice(3); //size of the matrix (the matrix will have NxN elements) int N = 2000; dim3 grid(16,1,1); dim3 block(1024,1,1); // pointers to host memory int *a_h; int *b_h; int *c_h; int *d_h; // pointers to device memory int *a_d; int *b_d; int *c_d; // this variable holds the number of bytes required by arrays int size; // use CUDA events to measure time hipEvent_t start; hipEvent_t stop; float elapsedTime; //print out the information about number of blocks and threads printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y); printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y); //dynamically alocate host memory and load the arrays with some data size = N * N * sizeof(int); a_h = (int*) malloc(size); b_h = (int*) malloc(size); c_h = (int*) malloc(size); d_h = (int*) malloc(size); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { a_h[i * N + j] = i; b_h[i * N + j] = i; } } //allocate memory on the device hipMalloc((void**)&a_d, size); hipMalloc((void**)&b_d, size); hipMalloc((void**)&c_d, size); //copy the host memory to the device hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); hipMemcpy(c_d, c_h, size, hipMemcpyHostToDevice); //start the timers hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); //launch the kernel hipLaunchKernelGGL(( matrixAddKernel), dim3(grid),dim3(block), 0, 0, a_d, b_d, c_d, N); //stop the timer and print out the execution time hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("Time to calculate results on GPU: %f ms.\n", elapsedTime); //copy the results to host hipMemcpy(c_h, c_d, size ,hipMemcpyDeviceToHost); //time to measure CPU performance hipEventRecord(start,0); //launch the matrixAdd function that executes on the CPU matrixAdd(a_h, b_h, d_h, N); //strop the timer and print out the results hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsedTime); //check that GPU and CPU results match for(int i=0; i<N*N; i++) { if (c_h[i] != d_h[i]) printf("Error: CPU and GPU results do not match\n"); break; } // clean up free(a_h); free(b_h); free(c_h); free(d_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
decf6e5e83022f8f32a3278ed02060692e4fa4bc.cu
#include <stdio.h> #include <cuda.h> // CPU code to do matrix ADdition void matrixAdd(int *a, int *b, int *c, int N) { int index; for(int col=0; col<N; col++) { for(int row=0; row<N; row++) { index = row * N + col; c[index] = a[index] + b[index]; } } } // GPU code to do matrix addition __global__ void matrixAddKernel(int *a, int *b, int *c, int N) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row * N + col; if(col < N && row < N) { c[index] = a[index] + b[index]; } } int main(void) { cudaSetDevice(3); //size of the matrix (the matrix will have NxN elements) int N = 2000; dim3 grid(16,1,1); dim3 block(1024,1,1); // pointers to host memory int *a_h; int *b_h; int *c_h; int *d_h; // pointers to device memory int *a_d; int *b_d; int *c_d; // this variable holds the number of bytes required by arrays int size; // use CUDA events to measure time cudaEvent_t start; cudaEvent_t stop; float elapsedTime; //print out the information about number of blocks and threads printf("Number of threads: %i (%ix%i)\n", block.x*block.y, block.x, block.y); printf("Number of blocks: %i (%ix%i)\n", grid.x*grid.y, grid.x, grid.y); //dynamically alocate host memory and load the arrays with some data size = N * N * sizeof(int); a_h = (int*) malloc(size); b_h = (int*) malloc(size); c_h = (int*) malloc(size); d_h = (int*) malloc(size); for(int i=0; i<N; i++) { for(int j=0; j<N; j++) { a_h[i * N + j] = i; b_h[i * N + j] = i; } } //allocate memory on the device cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); //copy the host memory to the device cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(c_d, c_h, size, cudaMemcpyHostToDevice); //start the timers cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //launch the kernel matrixAddKernel<<<grid,block>>>(a_d, b_d, c_d, N); //stop the timer and print out the execution time cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time to calculate results on GPU: %f ms.\n", elapsedTime); //copy the results to host cudaMemcpy(c_h, c_d, size ,cudaMemcpyDeviceToHost); //time to measure CPU performance cudaEventRecord(start,0); //launch the matrixAdd function that executes on the CPU matrixAdd(a_h, b_h, d_h, N); //strop the timer and print out the results cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop ); printf("Time to calculate results on CPU: %f ms.\n", elapsedTime); //check that GPU and CPU results match for(int i=0; i<N*N; i++) { if (c_h[i] != d_h[i]) printf("Error: CPU and GPU results do not match\n"); break; } // clean up free(a_h); free(b_h); free(c_h); free(d_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
457b256031e755ab9041bbaa79dfb64227fc4b8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/hip/LaunchUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; const scalar_t* btm_data = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = btm_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; top_data = top_data + batch_id * pooled_height * pooled_width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; bottom_data = bottom_data + batch_id * in_stride_n; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int hstart = oh * stride_h - pad_h; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int wstart = ow * stride_w - pad_w; int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { scalar_t val = ptr_input[c*in_stride_c]; if ((scalar_cast<accscalar_t>(val) > out_cached[cached_index]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[cached_index] = scalar_cast<accscalar_t>(val); out_mask_cached[cached_index] = ih * width + iw; } cached_index += blockDim.x; } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; int cached_index = threadIdx.x; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { ptr_output_data[c] = out_cached[cached_index]; ptr_output_mask[c] = out_mask_cached[cached_index]; out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[cached_index] = 0; cached_index += blockDim.x; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index / width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) { for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw + offset] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw + offset]); } } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* bottom_diff) { extern __shared__ int smem[]; accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = accscalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; bottom_diff = bottom_diff + batch_id * height * width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; top_diff = top_diff + batch_id * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); int index_shift = ih * width + iw; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { int cached_index = threadIdx.x; const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { out_cached[cached_index] += scalar_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(out_cached[cached_index]); out_cached[cached_index] = accscalar_t(0.0); cached_index += blockDim.x; } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t)); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); hipLaunchKernelGGL(( max_pool_forward_nhwc<scalar_t, scalar_t>) , dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, output_data, indices_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { const int num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); hipLaunchKernelGGL(( max_pool_forward_nchw<scalar_t, scalar_t>) , dim3(cuda::ATenCeilDiv(count, num_threads)), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); indices.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); // The backward kernel is launched on input instead output. // If it is launched on output layer, atomic_add would not provide much benefit on FP16. // Please check comments at https://github.com/pytorch/pytorch/pull/34519. hipLaunchKernelGGL(( max_pool_backward_nhwc<scalar_t, accscalar_t>) , dim3(grid), dim3(block), shmem_size, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, gradInput_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; hipLaunchKernelGGL(( max_pool_backward_nchw<scalar_t, accscalar_t>) , dim3(grid), dim3(BLOCK_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); C10_HIP_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
457b256031e755ab9041bbaa79dfb64227fc4b8f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NamedTensorUtils.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> #include <ATen/native/cuda/LaunchUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit #define BLOCK_STRIDE 2 // increasing block_stride to lower # of blocks launched static __device__ inline int p_start(int size, int pad, int kernel, int dilation, int stride) { return (size + pad < ((kernel - 1) * dilation + 1)) ? 0 : (size + pad - ((kernel - 1) * dilation + 1)) / stride + 1; } static __device__ inline int p_end(int size, int pad, int pooled_size, int stride) { return min((size + pad) / stride + 1, pooled_size); } // kernels borrowed from Caffe template <typename scalar_t, typename accscalar_t> __global__ void max_pool_forward_nchw(const int nthreads, const scalar_t* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* top_data, int64_t* top_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; accscalar_t maxval = at::numeric_limits<accscalar_t>::lower_bound(); // -Infinity int maxidx = hstart * width + wstart; const scalar_t* btm_data = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; h += dilation_h) { for (int w = wstart; w < wend; w += dilation_w) { scalar_t val = btm_data[h * width + w]; if ((ScalarConvert<scalar_t, accscalar_t>::to(val) > maxval) || THCNumerics<scalar_t>::isnan(val)) { maxidx = h * width + w; maxval = ScalarConvert<scalar_t, accscalar_t>::to(val); } } } top_data[index] = ScalarConvert<scalar_t, accscalar_t>::to(maxval); top_mask[index] = maxidx; } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_forward_nhwc(const scalar_t* bottom_data, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* top_data, int64_t* top_mask) { extern __shared__ int smem[]; int *out_mask_cached = smem; scalar_t *out_cached = reinterpret_cast<scalar_t*>(&out_mask_cached[kernel_size_C*blockDim.x*blockDim.y*blockDim.z]); // flattening cta for pre-computation & smem initialization; int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; // use shared memory to store temporary output value. This is simply to // reduce register usage. for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[i] = 0; } __syncthreads(); int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; top_data = top_data + batch_id * pooled_height * pooled_width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; bottom_data = bottom_data + batch_id * in_stride_n; out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; out_mask_cached = &out_mask_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; int oH = (pooled_height + gridDim.z-1) / gridDim.z; int oW = (pooled_width + gridDim.y-1) / gridDim.y; int ostartH = threadIdx.z + blockIdx.z*oH; int oendH = ::min(ostartH+oH, pooled_height); int ostartW = threadIdx.y + blockIdx.y*oW; int oendW = ::min(ostartW+oW, pooled_width); for (int oh = ostartH; oh < oendH; oh+=blockDim.z) { int hstart = oh * stride_h - pad_h; int hend = min(hstart + (kernel_h - 1) * dilation_h + 1, height); for (int ow = ostartW; ow < oendW; ow+=blockDim.y) { int wstart = ow * stride_w - pad_w; int wend = min(wstart + (kernel_w - 1) * dilation_w + 1, width); while(hstart < 0) hstart += dilation_h; while(wstart < 0) wstart += dilation_w; for (int ih = hstart; ih < hend; ih++) { for (int iw = wstart; iw < wend; iw++) { int cached_index = threadIdx.x; const scalar_t *ptr_input = bottom_data + ih * in_stride_h + iw * in_stride_w; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { scalar_t val = ptr_input[c*in_stride_c]; if ((scalar_cast<accscalar_t>(val) > out_cached[cached_index]) || THCNumerics<scalar_t>::isnan(val)) { out_cached[cached_index] = scalar_cast<accscalar_t>(val); out_mask_cached[cached_index] = ih * width + iw; } cached_index += blockDim.x; } } } scalar_t *ptr_output_data = top_data + (oh * pooled_width + ow) * channels; int64_t *ptr_output_mask = top_mask + (oh * pooled_width + ow) * channels; int cached_index = threadIdx.x; for(int c = channel_offset; c < channels; c+= blockDim.x*kernel_stride_C) { ptr_output_data[c] = out_cached[cached_index]; ptr_output_mask[c] = out_mask_cached[cached_index]; out_cached[cached_index] = at::numeric_limits<scalar_t>::lower_bound(); out_mask_cached[cached_index] = 0; cached_index += blockDim.x; } } } } static const int BLOCK_THREADS = 256; template <typename scalar_t, typename accscalar_t> #if defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 4) #else C10_LAUNCH_BOUNDS_2(BLOCK_THREADS, 8) #endif __global__ void max_pool_backward_nchw(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, scalar_t* bottom_diff) { CUDA_KERNEL_LOOP(index, height*width) { int h = index / width; int w = index - h * width; int phstart = p_start(h, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(h, pad_h, pooled_height, stride_h); int pwstart = p_start(w, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(w, pad_w, pooled_width, stride_w); for (int n = blockIdx.y; n < num; n += gridDim.y) { for (int c = blockIdx.z; c < channels; c+= gridDim.z) { accscalar_t gradient = accscalar_t(0); int offset = (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask[ph * pooled_width + pw + offset] == h * width + w) { gradient += ScalarConvert<scalar_t, accscalar_t>::to(top_diff[ph * pooled_width + pw + offset]); } } } bottom_diff[(n*channels+c)*height*width+index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(CUDA_MAX_THREADS) __global__ void max_pool_backward_nhwc(const int nthreads, const scalar_t* top_diff, const int64_t* top_mask, const int nbatch, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const int out_stride_c, const int out_stride_h, const int out_stride_w, const int in_stride_n, const int in_stride_c, const int in_stride_h, const int in_stride_w, const int kernel_stride_C, const int kernel_size_C, scalar_t* bottom_diff) { extern __shared__ int smem[]; accscalar_t *out_cached = reinterpret_cast<accscalar_t*>(smem); int thread_id = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z); int block_size = blockDim.x * blockDim.y * blockDim.z; int batch_id = blockIdx.x % nbatch; int channel_id = blockIdx.x / nbatch; int channel_offset = threadIdx.x + channel_id * blockDim.x; for (int i = thread_id; i < kernel_size_C*blockDim.x*blockDim.y*blockDim.z; i+= block_size) { out_cached[i] = accscalar_t(0.0); } __syncthreads(); out_cached = &out_cached[(threadIdx.z * blockDim.y + threadIdx.y) * kernel_size_C*blockDim.x]; bottom_diff = bottom_diff + batch_id * height * width * channels; top_mask = top_mask + batch_id * pooled_height * pooled_width * channels; top_diff = top_diff + batch_id * pooled_height * pooled_width * channels; int iH = (height + gridDim.z-1) / gridDim.z; int iW = (width + gridDim.y-1) / gridDim.y; int istartH = threadIdx.z + blockIdx.z*iH; int iendH = ::min(istartH+iH, height); int istartW = threadIdx.y + blockIdx.y*iW; int iendW = ::min(istartW+iW, width); for (int ih = istartH; ih < iendH; ih+=blockDim.z) { int phstart = p_start(ih, pad_h, kernel_h, dilation_h, stride_h); int phend = p_end(ih, pad_h, pooled_height, stride_h); for (int iw = istartW; iw < iendW; iw+=blockDim.y) { int pwstart = p_start(iw, pad_w, kernel_w, dilation_w, stride_w); int pwend = p_end(iw, pad_w, pooled_width, stride_w); int index_shift = ih * width + iw; if ((phstart + 1 != phend) || (pwstart + 1 != pwend)) { for(int oh = phstart; oh < phend; ++oh) { for(int ow = pwstart; ow < pwend; ++ow) { int cached_index = threadIdx.x; const int64_t* ptr_top_mask = top_mask + oh * out_stride_h + ow * out_stride_w; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { out_cached[cached_index] += scalar_cast<accscalar_t>(top_diff[oh * out_stride_h + ow * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(out_cached[cached_index]); out_cached[cached_index] = accscalar_t(0.0); cached_index += blockDim.x; } } else { const int64_t* ptr_top_mask = top_mask + phstart * out_stride_h + pwstart * out_stride_w; scalar_t *ptr_bottom_diff = bottom_diff + index_shift * channels; int cached_index = threadIdx.x; for (int c = channel_offset; c < channels; c += blockDim.x*kernel_stride_C) { if (ptr_top_mask[c*out_stride_c] == index_shift) { ptr_bottom_diff[c] = scalar_cast<scalar_t>(top_diff[phstart * out_stride_h + pwstart * out_stride_w + c*out_stride_c]); } cached_index += blockDim.x; } } } } } void max_pool2d_with_indices_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg output_arg{ output, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {output_arg, indices_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); const int64_t in_stride_n = input_.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); indices.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); output.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); indices.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); const int count = safe_downcast<int, int64_t>(output.numel()); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(outputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(outputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(outputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * (sizeof(int) + sizeof(scalar_t)); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); max_pool_forward_nhwc<scalar_t, scalar_t> <<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>( input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, output_data, indices_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { const int num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, BLOCK_THREADS); max_pool_forward_nchw<scalar_t, scalar_t> <<<cuda::ATenCeilDiv(count, num_threads), num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, output_data, indices_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); if(input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); indices.resize_({nInputPlane, outputHeight, outputWidth}); } } void max_pool2d_with_indices_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, const Tensor& indices, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; TensorArg indices_arg{ indices, "indices", 4 }; checkAllSameGPU("max_pool2d_with_indices_out_cuda", {gradInput_arg, gradOutput_arg, input_arg, indices_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "max_pool2d: kernel_size must either be a single int, or a tuple of two ints") const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); // NB: stride default is not expressible as an integer constant, so we accept // empty stride for this case TORCH_CHECK(stride.size() == 0 || stride.size() == 1 || stride.size() == 2, "max_pool2d: stride must either be omitted, a single int, or a tuple of two ints") const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "max_pool2d: padding must be either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(dilation.size() == 1 || dilation.size() == 2, "max_pool2d: dilation must be either a single int, or a tuple of two ints"); const int dilationH = safe_downcast<int, int64_t>(dilation[0]); const int dilationW = dilation.size() == 1 ? dilationH : safe_downcast<int, int64_t>(dilation[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t in_stride_n = input.ndimension() == 4 ? input.stride(-4) : 0; const int64_t in_stride_c = input.stride(-3); const int64_t in_stride_h = input.stride(-2); const int64_t in_stride_w = input.stride(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode); max_pool2d_backward_shape_check( input_, gradOutput_, indices, nbatch, kH, kW, dH, dW, padH, padW, dilationH, dilationW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, /*cuda=*/ true); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t out_stride_c = gradOutput.stride(-3); const int64_t out_stride_h = gradOutput.stride(-2); const int64_t out_stride_w = gradOutput.stride(-1); gradInput.resize_as_(input); gradInput.unsafeGetTensorImpl()->empty_tensor_restride(memory_format); int64_t count = input.numel(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "max_pool2d_with_indices_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); int64_t *indices_data = indices.data_ptr<int64_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { const int max_threads = std::min<int>(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, CUDA_MAX_THREADS); int* maxThreadsDim = at::cuda::getCurrentDeviceProperties()->maxThreadsDim; int block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), at::cuda::warp_size())); int block_y = std::min<int>( maxThreadsDim[1], std::min<int>(lastPow2(inputWidth), max_threads / block_x)); int block_z = std::min<int>( maxThreadsDim[2], std::min<int>(lastPow2(inputHeight), max_threads / block_x / block_y)); block_x = std::min<int>( maxThreadsDim[0], std::min<int>(lastPow2(nInputPlane), max_threads / block_y / block_z)); const dim3 block(block_x, block_y, block_z); int kernel_stride_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * 4); int kernel_size_C = cuda::ATenCeilDiv( safe_downcast<int, int64_t>(nInputPlane), block_x * kernel_stride_C); int grid_x = nbatch*kernel_stride_C; int grid_y = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[1], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputWidth), block_y*BLOCK_STRIDE)); int grid_z = std::min<int>( at::cuda::getCurrentDeviceProperties()->maxGridSize[2], cuda::ATenCeilDiv(safe_downcast<int, int64_t>(inputHeight), block_z*BLOCK_STRIDE)); const dim3 grid(grid_x, grid_y, grid_z); size_t shmem_size = (kernel_size_C * block_x*block_y*block_z) * sizeof(accscalar_t); AT_ASSERT(shmem_size <= at::cuda::getCurrentDeviceProperties()->sharedMemPerBlock); // The backward kernel is launched on input instead output. // If it is launched on output layer, atomic_add would not provide much benefit on FP16. // Please check comments at https://github.com/pytorch/pytorch/pull/34519. max_pool_backward_nhwc<scalar_t, accscalar_t> <<<grid, block, shmem_size, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, out_stride_c, out_stride_h, out_stride_w, in_stride_n, in_stride_c, in_stride_h, in_stride_w, kernel_stride_C, kernel_size_C, gradInput_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } case MemoryFormat::Contiguous: { int imgcount = inputWidth * inputHeight; dim3 grid; const int blocks = (imgcount + BLOCK_THREADS - 1) / BLOCK_THREADS; grid.x = blocks; grid.y = nbatch; uint64_t maxGridY = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; if (maxGridY < grid.y) grid.y = maxGridY; grid.z = nInputPlane; uint64_t maxGridZ = at::cuda::getCurrentDeviceProperties()->maxGridSize[2]; if (maxGridZ < grid.z) grid.z = maxGridZ; max_pool_backward_nchw<scalar_t, accscalar_t> <<<grid, BLOCK_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, indices_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, dilationH, dilationW, gradInput_data); C10_CUDA_KERNEL_LAUNCH_CHECK(); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); } } // namespace std::tuple<Tensor&, Tensor&> max_pool2d_with_indices_out_cuda( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> max_pool2d_with_indices_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode) { NoNamesGuard guard; Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); max_pool2d_with_indices_out_cuda_template( output, indices, input, kernel_size, stride, padding, dilation, ceil_mode); guard.reset(); namedinference::propagate_names(output, input); namedinference::propagate_names(indices, input); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& max_pool2d_with_indices_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } Tensor max_pool2d_with_indices_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, bool ceil_mode, const Tensor& indices) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); max_pool2d_with_indices_backward_out_cuda_template( gradInput, gradOutput_, input, indices, kernel_size, stride, padding, dilation, ceil_mode); return gradInput; } } // at::native } // at
307c2c624158616ed2fe30e7c6c285d02bba96a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cstdio" #include <iostream> #include <chrono> constexpr size_t SIZE = 16384 * 3; // 16384 * 3 constexpr size_t BLOCK_COUNT = 4096; //for shared alg 16384 * 3 for simple constexpr size_t BLOCK_SIZE = SIZE / BLOCK_COUNT; constexpr size_t THREAD_PER_BLOCK = 128; /* * GPU Elapsed time 23.9948 * CPU Elapsed time 6720 */ template<typename T> __global__ void sumMatrixRowShared(T* matrix, T* result) { __shared__ float data[THREAD_PER_BLOCK]; for(size_t row_num = BLOCK_SIZE * blockIdx.x; row_num < BLOCK_SIZE * (blockIdx.x + 1); row_num++) { size_t row_start = row_num * SIZE; size_t idx = threadIdx.x; data[idx] = matrix[row_start + idx]; for (size_t i=1; i * THREAD_PER_BLOCK + idx < SIZE; i++) { data[idx] = data[idx] + matrix[row_start + i * THREAD_PER_BLOCK + idx]; } __syncthreads(); for (size_t s = 1; s < blockDim.x; s <<= 1 ) { size_t index = 2 * s * idx; if ( index < blockDim.x ) data [index] += data [index + s]; __syncthreads (); } if (idx == 0) result[row_num] = data[0]; } } template <typename T> void sumMatrixRowCPU(const float* matrix, T* result) { for(int idx = 0; idx < SIZE; idx++) { result[idx] = 0; for(size_t i=0; i < SIZE; i++) { result[idx] = result[idx] + matrix[idx * SIZE + i]; } } } __host__ int main() { // auto* matrix = new float[SIZE * SIZE]; auto* result = new float[SIZE]; auto* result_1 = new float[SIZE]; // for (int i = 0; i < SIZE * SIZE; i++) { matrix[i] = int(i/SIZE); result[i%SIZE] = 0; result_1[i%SIZE] = 0; } float* gpu_matrix; float* gpu_result; // hipMalloc((void**)&gpu_matrix, sizeof(float) * SIZE * SIZE); hipMalloc((void**)&gpu_result, sizeof(float) * SIZE); hipMemcpy(gpu_matrix, matrix, sizeof(float) * SIZE * SIZE, hipMemcpyHostToDevice); hipMemcpy(gpu_result, result, sizeof(float) * SIZE, hipMemcpyHostToDevice); dim3 gridSize = dim3(BLOCK_COUNT, 1, 1); // dim3 blockSize = dim3(THREAD_PER_BLOCK, 1, 1); // // hipEvent_t kernel_start; hipEventCreate(&kernel_start); hipEventRecord(kernel_start, 0); hipLaunchKernelGGL(( sumMatrixRowShared), dim3(gridSize), dim3(blockSize), 0, 0, gpu_matrix, gpu_result); hipEvent_t syncEvent; // hipEventCreate(&syncEvent); // event hipEventRecord(syncEvent, 0); // event hipEventSynchronize(syncEvent); // event float time; hipEventElapsedTime(&time, kernel_start, syncEvent); hipMemcpy(result, gpu_result, sizeof(float) * SIZE, hipMemcpyDeviceToHost); std::cout << "GPU Elapsed time " << time << std::endl; auto t1 = std::chrono::high_resolution_clock::now(); sumMatrixRowCPU(matrix, result_1); auto t2 = std::chrono::high_resolution_clock::now(); std::cout << "CPU Elapsed time " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() <<std::endl; for (int i = 0; i < 10; i++) { printf("Element #%i: %.1f %1.f\n", i , result[i], result_1[i]); } // hipEventDestroy(syncEvent); hipFree(gpu_matrix); hipFree(gpu_result); delete[] result; delete[] matrix; }
307c2c624158616ed2fe30e7c6c285d02bba96a7.cu
#include "cstdio" #include <iostream> #include <chrono> constexpr size_t SIZE = 16384 * 3; // 16384 * 3 constexpr size_t BLOCK_COUNT = 4096; //for shared alg 16384 * 3 for simple constexpr size_t BLOCK_SIZE = SIZE / BLOCK_COUNT; constexpr size_t THREAD_PER_BLOCK = 128; /* * GPU Elapsed time 23.9948 * CPU Elapsed time 6720 */ template<typename T> __global__ void sumMatrixRowShared(T* matrix, T* result) { __shared__ float data[THREAD_PER_BLOCK]; for(size_t row_num = BLOCK_SIZE * blockIdx.x; row_num < BLOCK_SIZE * (blockIdx.x + 1); row_num++) { size_t row_start = row_num * SIZE; size_t idx = threadIdx.x; data[idx] = matrix[row_start + idx]; for (size_t i=1; i * THREAD_PER_BLOCK + idx < SIZE; i++) { data[idx] = data[idx] + matrix[row_start + i * THREAD_PER_BLOCK + idx]; } __syncthreads(); for (size_t s = 1; s < blockDim.x; s <<= 1 ) { size_t index = 2 * s * idx; if ( index < blockDim.x ) data [index] += data [index + s]; __syncthreads (); } if (idx == 0) result[row_num] = data[0]; } } template <typename T> void sumMatrixRowCPU(const float* matrix, T* result) { for(int idx = 0; idx < SIZE; idx++) { result[idx] = 0; for(size_t i=0; i < SIZE; i++) { result[idx] = result[idx] + matrix[idx * SIZE + i]; } } } __host__ int main() { //Выделяем память под вектора auto* matrix = new float[SIZE * SIZE]; auto* result = new float[SIZE]; auto* result_1 = new float[SIZE]; //Инициализируем значения векторов for (int i = 0; i < SIZE * SIZE; i++) { matrix[i] = int(i/SIZE); result[i%SIZE] = 0; result_1[i%SIZE] = 0; } float* gpu_matrix; float* gpu_result; //Выделяем память для векторов на видеокарте cudaMalloc((void**)&gpu_matrix, sizeof(float) * SIZE * SIZE); cudaMalloc((void**)&gpu_result, sizeof(float) * SIZE); cudaMemcpy(gpu_matrix, matrix, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(gpu_result, result, sizeof(float) * SIZE, cudaMemcpyHostToDevice); dim3 gridSize = dim3(BLOCK_COUNT, 1, 1); //Размер используемой сетки dim3 blockSize = dim3(THREAD_PER_BLOCK, 1, 1); //Размер используемого блока //Выполняем вызов функции ядра cudaEvent_t kernel_start; cudaEventCreate(&kernel_start); cudaEventRecord(kernel_start, 0); sumMatrixRowShared<<<gridSize, blockSize>>>(gpu_matrix, gpu_result); cudaEvent_t syncEvent; //Дескриптор события cudaEventCreate(&syncEvent); //Создаем event cudaEventRecord(syncEvent, 0); //Записываем event cudaEventSynchronize(syncEvent); //Синхронизируем event float time; cudaEventElapsedTime(&time, kernel_start, syncEvent); cudaMemcpy(result, gpu_result, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); std::cout << "GPU Elapsed time " << time << std::endl; auto t1 = std::chrono::high_resolution_clock::now(); sumMatrixRowCPU(matrix, result_1); auto t2 = std::chrono::high_resolution_clock::now(); std::cout << "CPU Elapsed time " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() <<std::endl; for (int i = 0; i < 10; i++) { printf("Element #%i: %.1f %1.f\n", i , result[i], result_1[i]); } // Освобождаем ресурсы cudaEventDestroy(syncEvent); cudaFree(gpu_matrix); cudaFree(gpu_result); delete[] result; delete[] matrix; }
f1e418c3789ef0da028839fb81b80ffb0e81a7cb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdint.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <ATen/hip/HIPContext.h> #include <torch/torch.h> #include <algorithm> #include <stdexcept> #include <cstdio> #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") #define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") #define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") template <typename T> __host__ __device__ T div_round_up(T val, T divisor) { return (val + divisor - 1) / divisor; } template <typename scalar_t> __global__ void kernel_sh( const scalar_t * __restrict__ inputs, scalar_t * outputs, uint32_t B, uint32_t D, uint32_t C, const bool calc_grad_inputs, scalar_t * dy_dx ) { const uint32_t b = threadIdx.x + blockIdx.x * blockDim.x; if (b >= B) return; const uint32_t C2 = C * C; // locate inputs += b * D; outputs += b * C2; scalar_t x = inputs[0], y = inputs[1], z = inputs[2]; scalar_t xy=x*y, xz=x*z, yz=y*z, x2=x*x, y2=y*y, z2=z*z, xyz=xy*z; scalar_t x4=x2*x2, y4=y2*y2, z4=z2*z2; scalar_t x6=x4*x2, y6=y4*y2, z6=z4*z2; auto write_sh = [&]() { outputs[0] = 0.28209479177387814f ; // 1/(2*sqrt(pi)) if (C <= 1) { return; } outputs[1] = -0.48860251190291987f*y ; // -sqrt(3)*y/(2*sqrt(pi)) outputs[2] = 0.48860251190291987f*z ; // sqrt(3)*z/(2*sqrt(pi)) outputs[3] = -0.48860251190291987f*x ; // -sqrt(3)*x/(2*sqrt(pi)) if (C <= 2) { return; } outputs[4] = 1.0925484305920792f*xy ; // sqrt(15)*xy/(2*sqrt(pi)) outputs[5] = -1.0925484305920792f*yz ; // -sqrt(15)*yz/(2*sqrt(pi)) outputs[6] = 0.94617469575755997f*z2 - 0.31539156525251999f ; // sqrt(5)*(3*z2 - 1)/(4*sqrt(pi)) outputs[7] = -1.0925484305920792f*xz ; // -sqrt(15)*xz/(2*sqrt(pi)) outputs[8] = 0.54627421529603959f*x2 - 0.54627421529603959f*y2 ; // sqrt(15)*(x2 - y2)/(4*sqrt(pi)) if (C <= 3) { return; } outputs[9] = 0.59004358992664352f*y*(-3.0f*x2 + y2) ; // sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) outputs[10] = 2.8906114426405538f*xy*z ; // sqrt(105)*xy*z/(2*sqrt(pi)) outputs[11] = 0.45704579946446572f*y*(1.0f - 5.0f*z2) ; // sqrt(42)*y*(1 - 5*z2)/(8*sqrt(pi)) outputs[12] = 0.3731763325901154f*z*(5.0f*z2 - 3.0f) ; // sqrt(7)*z*(5*z2 - 3)/(4*sqrt(pi)) outputs[13] = 0.45704579946446572f*x*(1.0f - 5.0f*z2) ; // sqrt(42)*x*(1 - 5*z2)/(8*sqrt(pi)) outputs[14] = 1.4453057213202769f*z*(x2 - y2) ; // sqrt(105)*z*(x2 - y2)/(4*sqrt(pi)) outputs[15] = 0.59004358992664352f*x*(-x2 + 3.0f*y2) ; // sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) if (C <= 4) { return; } outputs[16] = 2.5033429417967046f*xy*(x2 - y2) ; // 3*sqrt(35)*xy*(x2 - y2)/(4*sqrt(pi)) outputs[17] = 1.7701307697799304f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(70)*yz*(-3*x2 + y2)/(8*sqrt(pi)) outputs[18] = 0.94617469575756008f*xy*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*xy*(7*z2 - 1)/(4*sqrt(pi)) outputs[19] = 0.66904654355728921f*yz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*yz*(3 - 7*z2)/(8*sqrt(pi)) outputs[20] = -3.1735664074561294f*z2 + 3.7024941420321507f*z4 + 0.31735664074561293f ; // 3*(-30*z2 + 35*z4 + 3)/(16*sqrt(pi)) outputs[21] = 0.66904654355728921f*xz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*xz*(3 - 7*z2)/(8*sqrt(pi)) outputs[22] = 0.47308734787878004f*(x2 - y2)*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*(x2 - y2)*(7*z2 - 1)/(8*sqrt(pi)) outputs[23] = 1.7701307697799304f*xz*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*xz*(-x2 + 3*y2)/(8*sqrt(pi)) outputs[24] = -3.7550144126950569f*x2*y2 + 0.62583573544917614f*x4 + 0.62583573544917614f*y4 ; // 3*sqrt(35)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) if (C <= 5) { return; } outputs[25] = 0.65638205684017015f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(154)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) outputs[26] = 8.3026492595241645f*xy*z*(x2 - y2) ; // 3*sqrt(385)*xy*z*(x2 - y2)/(4*sqrt(pi)) outputs[27] = -0.48923829943525038f*y*(3.0f*x2 - y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*y*(3*x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) outputs[28] = 4.7935367849733241f*xy*z*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xy*z*(3*z2 - 1)/(4*sqrt(pi)) outputs[29] = 0.45294665119569694f*y*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*y*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) outputs[30] = 0.1169503224534236f*z*(-70.0f*z2 + 63.0f*z4 + 15.0f) ; // sqrt(11)*z*(-70*z2 + 63*z4 + 15)/(16*sqrt(pi)) outputs[31] = 0.45294665119569694f*x*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*x*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) outputs[32] = 2.3967683924866621f*z*(x2 - y2)*(3.0f*z2 - 1.0f) ; // sqrt(1155)*z*(x2 - y2)*(3*z2 - 1)/(8*sqrt(pi)) outputs[33] = -0.48923829943525038f*x*(x2 - 3.0f*y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*x*(x2 - 3*y2)*(9*z2 - 1)/(32*sqrt(pi)) outputs[34] = 2.0756623148810411f*z*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) outputs[35] = 0.65638205684017015f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(154)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) if (C <= 6) { return; } outputs[36] = 1.3663682103838286f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // sqrt(6006)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) outputs[37] = 2.3666191622317521f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) outputs[38] = 2.0182596029148963f*xy*(x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*xy*(x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) outputs[39] = -0.92120525951492349f*yz*(3.0f*x2 - y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*yz*(3*x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) outputs[40] = 0.92120525951492349f*xy*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*xy*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) outputs[41] = 0.58262136251873131f*yz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*yz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) outputs[42] = 6.6747662381009842f*z2 - 20.024298714302954f*z4 + 14.684485723822165f*z6 - 0.31784601133814211f ; // sqrt(13)*(105*z2 - 315*z4 + 231*z6 - 5)/(32*sqrt(pi)) outputs[43] = 0.58262136251873131f*xz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*xz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) outputs[44] = 0.46060262975746175f*(x2 - y2)*(11.0f*z2*(3.0f*z2 - 1.0f) - 7.0f*z2 + 1.0f) ; // sqrt(2730)*(x2 - y2)*(11*z2*(3*z2 - 1) - 7*z2 + 1)/(64*sqrt(pi)) outputs[45] = -0.92120525951492349f*xz*(x2 - 3.0f*y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*xz*(x2 - 3*y2)*(11*z2 - 3)/(32*sqrt(pi)) outputs[46] = 0.50456490072872406f*(11.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(91)*(11*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) outputs[47] = 2.3666191622317521f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) outputs[48] = 10.247761577878714f*x2*y4 - 10.247761577878714f*x4*y2 + 0.6831841051919143f*x6 - 0.6831841051919143f*y6 ; // sqrt(6006)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) if (C <= 7) { return; } outputs[49] = 0.70716273252459627f*y*(-21.0f*x2*y4 + 35.0f*x4*y2 - 7.0f*x6 + y6) ; // 3*sqrt(715)*y*(-21*x2*y4 + 35*x4*y2 - 7*x6 + y6)/(64*sqrt(pi)) outputs[50] = 5.2919213236038001f*xy*z*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*z*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) outputs[51] = -0.51891557872026028f*y*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // -3*sqrt(385)*y*(13*z2 - 1)*(-10*x2*y2 + 5*x4 + y4)/(64*sqrt(pi)) outputs[52] = 4.1513246297620823f*xy*z*(x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xy*z*(x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) outputs[53] = -0.15645893386229404f*y*(3.0f*x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*y*(3*x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) outputs[54] = 0.44253269244498261f*xy*z*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xy*z*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) outputs[55] = 0.090331607582517306f*y*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*y*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) outputs[56] = 0.068284276912004949f*z*(315.0f*z2 - 693.0f*z4 + 429.0f*z6 - 35.0f) ; // sqrt(15)*z*(315*z2 - 693*z4 + 429*z6 - 35)/(32*sqrt(pi)) outputs[57] = 0.090331607582517306f*x*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*x*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) outputs[58] = 0.07375544874083044f*z*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*z*(x2 - y2)*(143*z2*(3*z2 - 1) - 187*z2 + 45)/(64*sqrt(pi)) outputs[59] = -0.15645893386229404f*x*(x2 - 3.0f*y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*x*(x2 - 3*y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) outputs[60] = 1.0378311574405206f*z*(13.0f*z2 - 3.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(13*z2 - 3)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) outputs[61] = -0.51891557872026028f*x*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // -3*sqrt(385)*x*(13*z2 - 1)*(-10*x2*y2 + x4 + 5*y4)/(64*sqrt(pi)) outputs[62] = 2.6459606618019f*z*(15.0f*x2*y4 - 15.0f*x4*y2 + x6 - y6) ; // 3*sqrt(10010)*z*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) outputs[63] = 0.70716273252459627f*x*(-35.0f*x2*y4 + 21.0f*x4*y2 - x6 + 7.0f*y6) ; // 3*sqrt(715)*x*(-35*x2*y4 + 21*x4*y2 - x6 + 7*y6)/(64*sqrt(pi)) }; write_sh(); if (calc_grad_inputs) { scalar_t *dx = dy_dx + b * D * C2; scalar_t *dy = dx + C2; scalar_t *dz = dy + C2; auto write_sh_dx = [&]() { dx[0] = 0.0f ; // 0 if (C <= 1) { return; } dx[1] = 0.0f ; // 0 dx[2] = 0.0f ; // 0 dx[3] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) if (C <= 2) { return; } dx[4] = 1.0925484305920792f*y ; // sqrt(15)*y/(2*sqrt(pi)) dx[5] = 0.0f ; // 0 dx[6] = 0.0f ; // 0 dx[7] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) dx[8] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) if (C <= 3) { return; } dx[9] = -3.5402615395598609f*xy ; // -3*sqrt(70)*xy/(4*sqrt(pi)) dx[10] = 2.8906114426405538f*yz ; // sqrt(105)*yz/(2*sqrt(pi)) dx[11] = 0.0f ; // 0 dx[12] = 0.0f ; // 0 dx[13] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) dx[14] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) dx[15] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) if (C <= 4) { return; } dx[16] = 2.5033429417967046f*y*(3.0f*x2 - y2) ; // 3*sqrt(35)*y*(3*x2 - y2)/(4*sqrt(pi)) dx[17] = -10.620784618679583f*xy*z ; // -9*sqrt(70)*xy*z/(4*sqrt(pi)) dx[18] = 0.94617469575756008f*y*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*y*(7*z2 - 1)/(4*sqrt(pi)) dx[19] = 0.0f ; // 0 dx[20] = 0.0f ; // 0 dx[21] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) dx[22] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) dx[23] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) dx[24] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) if (C <= 5) { return; } dx[25] = 13.127641136803401f*xy*(-x2 + y2) ; // 15*sqrt(154)*xy*(-x2 + y2)/(8*sqrt(pi)) dx[26] = 8.3026492595241645f*yz*(3.0f*x2 - y2) ; // 3*sqrt(385)*yz*(3*x2 - y2)/(4*sqrt(pi)) dx[27] = 2.9354297966115022f*xy*(1.0f - 9.0f*z2) ; // 3*sqrt(770)*xy*(1 - 9*z2)/(16*sqrt(pi)) dx[28] = 4.7935367849733241f*yz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*yz*(3*z2 - 1)/(4*sqrt(pi)) dx[29] = 0.0f ; // 0 dx[30] = 0.0f ; // 0 dx[31] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) dx[32] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) dx[33] = -13.209434084751759f*x2*z2 + 1.4677148983057511f*x2 + 13.209434084751759f*y2*z2 - 1.4677148983057511f*y2 ; // 3*sqrt(770)*(-9*x2*z2 + x2 + 9*y2*z2 - y2)/(32*sqrt(pi)) dx[34] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) dx[35] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) if (C <= 6) { return; } dx[36] = 4.0991046311514854f*y*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 3*sqrt(6006)*y*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) dx[37] = 47.332383244635047f*xy*z*(-x2 + y2) ; // 15*sqrt(2002)*xy*z*(-x2 + y2)/(8*sqrt(pi)) dx[38] = 2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) dx[39] = 5.5272315570895412f*xy*z*(3.0f - 11.0f*z2) ; // 3*sqrt(2730)*xy*z*(3 - 11*z2)/(16*sqrt(pi)) dx[40] = 0.92120525951492349f*y*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*y*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dx[41] = 0.0f ; // 0 dx[42] = 0.0f ; // 0 dx[43] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) dx[44] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dx[45] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) dx[46] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) dx[47] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dx[48] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) if (C <= 7) { return; } dx[49] = 9.9002782553443485f*xy*(10.0f*x2*y2 - 3.0f*x4 - 3.0f*y4) ; // 21*sqrt(715)*xy*(10*x2*y2 - 3*x4 - 3*y4)/(32*sqrt(pi)) dx[50] = 15.875763970811402f*yz*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 9*sqrt(10010)*yz*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) dx[51] = -10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // -15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) dx[52] = 4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) dx[53] = 0.93875360317376422f*xy*(66.0f*z2 - 143.0f*z4 - 3.0f) ; // 9*sqrt(35)*xy*(66*z2 - 143*z4 - 3)/(32*sqrt(pi)) dx[54] = 0.44253269244498261f*yz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*yz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dx[55] = 0.0f ; // 0 dx[56] = 0.0f ; // 0 dx[57] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) dx[58] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dx[59] = 30.97886890473422f*x2*z2 - 67.120882626924143f*x2*z4 - 1.4081304047606462f*x2 - 30.97886890473422f*y2*z2 + 67.120882626924143f*y2*z4 + 1.4081304047606462f*y2 ; // 9*sqrt(35)*(66*x2*z2 - 143*x2*z4 - 3*x2 - 66*y2*z2 + 143*y2*z4 + 3*y2)/(64*sqrt(pi)) dx[60] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) dx[61] = -0.51891557872026028f*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 4.0f*x2*(x2 - 5.0f*y2) + x4 + 5.0f*y4) ; // -3*sqrt(385)*(13*z2 - 1)*(-10*x2*y2 + 4*x2*(x2 - 5*y2) + x4 + 5*y4)/(64*sqrt(pi)) dx[62] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dx[63] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) }; auto write_sh_dy = [&]() { dy[0] = 0.0f ; // 0 if (C <= 1) { return; } dy[1] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) dy[2] = 0.0f ; // 0 dy[3] = 0.0f ; // 0 if (C <= 2) { return; } dy[4] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) dy[5] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) dy[6] = 0.0f ; // 0 dy[7] = 0.0f ; // 0 dy[8] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) if (C <= 3) { return; } dy[9] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) dy[10] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) dy[11] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) dy[12] = 0.0f ; // 0 dy[13] = 0.0f ; // 0 dy[14] = -2.8906114426405538f*yz ; // -sqrt(105)*yz/(2*sqrt(pi)) dy[15] = 3.5402615395598609f*xy ; // 3*sqrt(70)*xy/(4*sqrt(pi)) if (C <= 4) { return; } dy[16] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) dy[17] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) dy[18] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) dy[19] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) dy[20] = 0.0f ; // 0 dy[21] = 0.0f ; // 0 dy[22] = 0.94617469575756008f*y*(1.0f - 7.0f*z2) ; // 3*sqrt(5)*y*(1 - 7*z2)/(4*sqrt(pi)) dy[23] = 10.620784618679583f*xy*z ; // 9*sqrt(70)*xy*z/(4*sqrt(pi)) dy[24] = 2.5033429417967046f*y*(-3.0f*x2 + y2) ; // 3*sqrt(35)*y*(-3*x2 + y2)/(4*sqrt(pi)) if (C <= 5) { return; } dy[25] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dy[26] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) dy[27] = -1.4677148983057511f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // -3*sqrt(770)*(x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) dy[28] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) dy[29] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) dy[30] = 0.0f ; // 0 dy[31] = 0.0f ; // 0 dy[32] = 4.7935367849733241f*yz*(1.0f - 3.0f*z2) ; // sqrt(1155)*yz*(1 - 3*z2)/(4*sqrt(pi)) dy[33] = 2.9354297966115022f*xy*(9.0f*z2 - 1.0f) ; // 3*sqrt(770)*xy*(9*z2 - 1)/(16*sqrt(pi)) dy[34] = 8.3026492595241645f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(385)*yz*(-3*x2 + y2)/(4*sqrt(pi)) dy[35] = 13.127641136803401f*xy*(x2 - y2) ; // 15*sqrt(154)*xy*(x2 - y2)/(8*sqrt(pi)) if (C <= 6) { return; } dy[36] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dy[37] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dy[38] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) dy[39] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) dy[40] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dy[41] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) dy[42] = 0.0f ; // 0 dy[43] = 0.0f ; // 0 dy[44] = 0.92120525951492349f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // sqrt(2730)*y*(18*z2 - 33*z4 - 1)/(32*sqrt(pi)) dy[45] = 5.5272315570895412f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(16*sqrt(pi)) dy[46] = -2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) dy[47] = 47.332383244635047f*xy*z*(x2 - y2) ; // 15*sqrt(2002)*xy*z*(x2 - y2)/(8*sqrt(pi)) dy[48] = 4.0991046311514854f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(6006)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) if (C <= 7) { return; } dy[49] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) dy[50] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dy[51] = 0.51891557872026028f*(13.0f*z2 - 1.0f)*(10.0f*x2*y2 - 5.0f*x4 + 4.0f*y2*(5.0f*x2 - y2) - y4) ; // 3*sqrt(385)*(13*z2 - 1)*(10*x2*y2 - 5*x4 + 4*y2*(5*x2 - y2) - y4)/(64*sqrt(pi)) dy[52] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) dy[53] = -0.46937680158688211f*(x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -9*sqrt(35)*(x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) dy[54] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dy[55] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) dy[56] = 0.0f ; // 0 dy[57] = 0.0f ; // 0 dy[58] = 0.44253269244498261f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 3*sqrt(70)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dy[59] = 0.93875360317376422f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 9*sqrt(35)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) dy[60] = -4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) dy[61] = 10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) dy[62] = 15.875763970811402f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 9*sqrt(10010)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dy[63] = 9.9002782553443485f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 21*sqrt(715)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) }; auto write_sh_dz = [&]() { dz[0] = 0.0f ; // 0 if (C <= 1) { return; } dz[1] = 0.0f ; // 0 dz[2] = 0.48860251190291992f ; // sqrt(3)/(2*sqrt(pi)) dz[3] = 0.0f ; // 0 if (C <= 2) { return; } dz[4] = 0.0f ; // 0 dz[5] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) dz[6] = 1.8923493915151202f*z ; // 3*sqrt(5)*z/(2*sqrt(pi)) dz[7] = -1.0925484305920792f*x ; // -sqrt(15)*x/(2*sqrt(pi)) dz[8] = 0.0f ; // 0 if (C <= 3) { return; } dz[9] = 0.0f ; // 0 dz[10] = 2.8906114426405538f*xy ; // sqrt(105)*xy/(2*sqrt(pi)) dz[11] = -4.5704579946446566f*yz ; // -5*sqrt(42)*yz/(4*sqrt(pi)) dz[12] = 5.597644988851731f*z2 - 1.1195289977703462f ; // 3*sqrt(7)*(5*z2 - 1)/(4*sqrt(pi)) dz[13] = -4.5704579946446566f*xz ; // -5*sqrt(42)*xz/(4*sqrt(pi)) dz[14] = 1.4453057213202769f*x2 - 1.4453057213202769f*y2 ; // sqrt(105)*(x2 - y2)/(4*sqrt(pi)) dz[15] = 0.0f ; // 0 if (C <= 4) { return; } dz[16] = 0.0f ; // 0 dz[17] = 1.7701307697799304f*y*(-3.0f*x2 + y2) ; // 3*sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) dz[18] = 13.246445740605839f*xy*z ; // 21*sqrt(5)*xy*z/(2*sqrt(pi)) dz[19] = 2.0071396306718676f*y*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*y*(1 - 7*z2)/(8*sqrt(pi)) dz[20] = 14.809976568128603f*pow(z, 3) - 6.3471328149122579f*z ; // (105*z**3 - 45*z)/(4*sqrt(pi)) dz[21] = 2.0071396306718676f*x*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*x*(1 - 7*z2)/(8*sqrt(pi)) dz[22] = 6.6232228703029197f*z*(x2 - y2) ; // 21*sqrt(5)*z*(x2 - y2)/(4*sqrt(pi)) dz[23] = 1.7701307697799304f*x*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) dz[24] = 0.0f ; // 0 if (C <= 5) { return; } dz[25] = 0.0f ; // 0 dz[26] = 8.3026492595241645f*xy*(x2 - y2) ; // 3*sqrt(385)*xy*(x2 - y2)/(4*sqrt(pi)) dz[27] = 8.8062893898345074f*yz*(-3.0f*x2 + y2) ; // 9*sqrt(770)*yz*(-3*x2 + y2)/(16*sqrt(pi)) dz[28] = 4.7935367849733241f*xy*(9.0f*z2 - 1.0f) ; // sqrt(1155)*xy*(9*z2 - 1)/(4*sqrt(pi)) dz[29] = 12.682506233479513f*yz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*yz*(1 - 3*z2)/(4*sqrt(pi)) dz[30] = -24.559567715218954f*z2 + 36.839351572828434f*z4 + 1.754254836801354f ; // 15*sqrt(11)*(-14*z2 + 21*z4 + 1)/(16*sqrt(pi)) dz[31] = 12.682506233479513f*xz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*xz*(1 - 3*z2)/(4*sqrt(pi)) dz[32] = 2.3967683924866621f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // sqrt(1155)*(x2 - y2)*(9*z2 - 1)/(8*sqrt(pi)) dz[33] = 8.8062893898345074f*xz*(-x2 + 3.0f*y2) ; // 9*sqrt(770)*xz*(-x2 + 3*y2)/(16*sqrt(pi)) dz[34] = -12.453973889286246f*x2*y2 + 2.0756623148810411f*x4 + 2.0756623148810411f*y4 ; // 3*sqrt(385)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) dz[35] = 0.0f ; // 0 if (C <= 6) { return; } dz[36] = 0.0f ; // 0 dz[37] = 2.3666191622317521f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dz[38] = 44.401711264127719f*xy*z*(x2 - y2) ; // 33*sqrt(91)*xy*z*(x2 - y2)/(4*sqrt(pi)) dz[39] = -2.7636157785447706f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*y*(3*x2 - y2)*(11*z2 - 1)/(32*sqrt(pi)) dz[40] = 11.054463114179082f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(8*sqrt(pi)) dz[41] = 2.9131068125936568f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*y*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) dz[42] = 2.6699064952403937f*z*(-30.0f*z2 + 33.0f*z4 + 5.0f) ; // 21*sqrt(13)*z*(-30*z2 + 33*z4 + 5)/(16*sqrt(pi)) dz[43] = 2.9131068125936568f*x*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*x*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) dz[44] = 5.5272315570895412f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(16*sqrt(pi)) dz[45] = -2.7636157785447706f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*x*(x2 - 3*y2)*(11*z2 - 1)/(32*sqrt(pi)) dz[46] = 11.10042781603193f*z*(-6.0f*x2*y2 + x4 + y4) ; // 33*sqrt(91)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) dz[47] = 2.3666191622317521f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) dz[48] = 0.0f ; // 0 if (C <= 7) { return; } dz[49] = 0.0f ; // 0 dz[50] = 5.2919213236038001f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) dz[51] = 13.491805046726766f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 39*sqrt(385)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dz[52] = 12.453973889286248f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 9*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(8*sqrt(pi)) dz[53] = -6.8841930899409371f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*yz*(3*x2 - y2)*(13*z2 - 3)/(16*sqrt(pi)) dz[54] = 2.2126634622249131f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 15*sqrt(70)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) dz[55] = 1.6259689364853116f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dz[56] = 64.528641681844675f*z2 - 236.60501950009714f*z4 + 205.05768356675085f*z6 - 2.3899496919201733f ; // 7*sqrt(15)*(135*z2 - 495*z4 + 429*z6 - 5)/(32*sqrt(pi)) dz[57] = 1.6259689364853116f*xz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*xz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dz[58] = 0.07375544874083044f*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) + 132.0f*z2*(13.0f*z2 - 5.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*(x2 - y2)*(143*z2*(3*z2 - 1) + 132*z2*(13*z2 - 5) - 187*z2 + 45)/(64*sqrt(pi)) dz[59] = -6.8841930899409371f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*xz*(x2 - 3*y2)*(13*z2 - 3)/(16*sqrt(pi)) dz[60] = 3.1134934723215619f*(13.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 9*sqrt(385)*(13*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) dz[61] = 13.491805046726766f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 39*sqrt(385)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) dz[62] = 39.6894099270285f*x2*y4 - 39.6894099270285f*x4*y2 + 2.6459606618019f*x6 - 2.6459606618019f*y6 ; // 3*sqrt(10010)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) dz[63] = 0.0f ; // 0 }; write_sh_dx(); write_sh_dy(); write_sh_dz(); } } template <typename scalar_t> __global__ void kernel_sh_backward( const scalar_t * __restrict__ grad, const scalar_t * __restrict__ inputs, uint32_t B, uint32_t D, uint32_t C, const scalar_t * __restrict__ dy_dx, scalar_t * grad_inputs ) { const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; const uint32_t b = t / D; if (b >= B) return; const uint32_t d = t - b * D; const uint32_t C2 = C * C; // locate grad += b * C2; dy_dx += b * D * C2 + d * C2; for (int ch = 0; ch < C2; ch++) { grad_inputs[t] += grad[ch] * dy_dx[ch]; //printf("t=%d, b=%d, d=%d, ch=%d, grad=%f (+= %f * %f)\n", t, b, d, ch, grad_inputs[t], grad[ch], dy_dx[ch]); } } // inputs: [B, D], float, in [0, 1] // outputs: [B, L * C], float template <typename scalar_t> void sh_encode_forward_cuda(const scalar_t *inputs, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, const bool calc_grad_inputs, scalar_t *dy_dx) { static constexpr uint32_t N_THREADS = 256; hipLaunchKernelGGL(( kernel_sh<scalar_t>), dim3(div_round_up(B, N_THREADS)), dim3(N_THREADS), 0, 0, inputs, outputs, B, D, C, calc_grad_inputs, dy_dx); } template <typename scalar_t> void sh_encode_backward_cuda(const scalar_t *grad, const scalar_t *inputs, const uint32_t B, const uint32_t D, const uint32_t C, scalar_t *dy_dx, scalar_t *grad_inputs) { static constexpr uint32_t N_THREADS = 256; hipLaunchKernelGGL(( kernel_sh_backward<scalar_t>), dim3(div_round_up(B * D, N_THREADS)), dim3(N_THREADS), 0, 0, grad, inputs, B, D, C, dy_dx, grad_inputs); } void sh_encode_forward(at::Tensor inputs, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const bool calc_grad_inputs, at::Tensor dy_dx) { CHECK_CUDA(inputs); CHECK_CUDA(outputs); CHECK_CUDA(dy_dx); CHECK_CONTIGUOUS(inputs); CHECK_CONTIGUOUS(outputs); CHECK_CONTIGUOUS(dy_dx); CHECK_IS_FLOATING(inputs); CHECK_IS_FLOATING(outputs); CHECK_IS_FLOATING(dy_dx); AT_DISPATCH_FLOATING_TYPES_AND_HALF( inputs.scalar_type(), "sh_encode_forward_cuda", ([&] { sh_encode_forward_cuda<scalar_t>(inputs.data_ptr<scalar_t>(), outputs.data_ptr<scalar_t>(), B, D, C, calc_grad_inputs, dy_dx.data_ptr<scalar_t>()); })); } void sh_encode_backward(at::Tensor grad, at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t C, at::Tensor dy_dx, at::Tensor grad_inputs) { CHECK_CUDA(grad); CHECK_CUDA(inputs); CHECK_CUDA(dy_dx); CHECK_CUDA(grad_inputs); CHECK_CONTIGUOUS(grad); CHECK_CONTIGUOUS(inputs); CHECK_CONTIGUOUS(dy_dx); CHECK_CONTIGUOUS(grad_inputs); CHECK_IS_FLOATING(grad); CHECK_IS_FLOATING(inputs); CHECK_IS_FLOATING(dy_dx); CHECK_IS_FLOATING(grad_inputs); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "sh_encode_backward_cuda", ([&] { sh_encode_backward_cuda<scalar_t>(grad.data_ptr<scalar_t>(), inputs.data_ptr<scalar_t>(), B, D, C, dy_dx.data_ptr<scalar_t>(), grad_inputs.data_ptr<scalar_t>()); })); }
f1e418c3789ef0da028839fb81b80ffb0e81a7cb.cu
#include <stdint.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <ATen/cuda/CUDAContext.h> #include <torch/torch.h> #include <algorithm> #include <stdexcept> #include <cstdio> #define CHECK_CUDA(x) TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be a contiguous tensor") #define CHECK_IS_INT(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Int, #x " must be an int tensor") #define CHECK_IS_FLOATING(x) TORCH_CHECK(x.scalar_type() == at::ScalarType::Float || x.scalar_type() == at::ScalarType::Half || x.scalar_type() == at::ScalarType::Double, #x " must be a floating tensor") template <typename T> __host__ __device__ T div_round_up(T val, T divisor) { return (val + divisor - 1) / divisor; } template <typename scalar_t> __global__ void kernel_sh( const scalar_t * __restrict__ inputs, scalar_t * outputs, uint32_t B, uint32_t D, uint32_t C, const bool calc_grad_inputs, scalar_t * dy_dx ) { const uint32_t b = threadIdx.x + blockIdx.x * blockDim.x; if (b >= B) return; const uint32_t C2 = C * C; // locate inputs += b * D; outputs += b * C2; scalar_t x = inputs[0], y = inputs[1], z = inputs[2]; scalar_t xy=x*y, xz=x*z, yz=y*z, x2=x*x, y2=y*y, z2=z*z, xyz=xy*z; scalar_t x4=x2*x2, y4=y2*y2, z4=z2*z2; scalar_t x6=x4*x2, y6=y4*y2, z6=z4*z2; auto write_sh = [&]() { outputs[0] = 0.28209479177387814f ; // 1/(2*sqrt(pi)) if (C <= 1) { return; } outputs[1] = -0.48860251190291987f*y ; // -sqrt(3)*y/(2*sqrt(pi)) outputs[2] = 0.48860251190291987f*z ; // sqrt(3)*z/(2*sqrt(pi)) outputs[3] = -0.48860251190291987f*x ; // -sqrt(3)*x/(2*sqrt(pi)) if (C <= 2) { return; } outputs[4] = 1.0925484305920792f*xy ; // sqrt(15)*xy/(2*sqrt(pi)) outputs[5] = -1.0925484305920792f*yz ; // -sqrt(15)*yz/(2*sqrt(pi)) outputs[6] = 0.94617469575755997f*z2 - 0.31539156525251999f ; // sqrt(5)*(3*z2 - 1)/(4*sqrt(pi)) outputs[7] = -1.0925484305920792f*xz ; // -sqrt(15)*xz/(2*sqrt(pi)) outputs[8] = 0.54627421529603959f*x2 - 0.54627421529603959f*y2 ; // sqrt(15)*(x2 - y2)/(4*sqrt(pi)) if (C <= 3) { return; } outputs[9] = 0.59004358992664352f*y*(-3.0f*x2 + y2) ; // sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) outputs[10] = 2.8906114426405538f*xy*z ; // sqrt(105)*xy*z/(2*sqrt(pi)) outputs[11] = 0.45704579946446572f*y*(1.0f - 5.0f*z2) ; // sqrt(42)*y*(1 - 5*z2)/(8*sqrt(pi)) outputs[12] = 0.3731763325901154f*z*(5.0f*z2 - 3.0f) ; // sqrt(7)*z*(5*z2 - 3)/(4*sqrt(pi)) outputs[13] = 0.45704579946446572f*x*(1.0f - 5.0f*z2) ; // sqrt(42)*x*(1 - 5*z2)/(8*sqrt(pi)) outputs[14] = 1.4453057213202769f*z*(x2 - y2) ; // sqrt(105)*z*(x2 - y2)/(4*sqrt(pi)) outputs[15] = 0.59004358992664352f*x*(-x2 + 3.0f*y2) ; // sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) if (C <= 4) { return; } outputs[16] = 2.5033429417967046f*xy*(x2 - y2) ; // 3*sqrt(35)*xy*(x2 - y2)/(4*sqrt(pi)) outputs[17] = 1.7701307697799304f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(70)*yz*(-3*x2 + y2)/(8*sqrt(pi)) outputs[18] = 0.94617469575756008f*xy*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*xy*(7*z2 - 1)/(4*sqrt(pi)) outputs[19] = 0.66904654355728921f*yz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*yz*(3 - 7*z2)/(8*sqrt(pi)) outputs[20] = -3.1735664074561294f*z2 + 3.7024941420321507f*z4 + 0.31735664074561293f ; // 3*(-30*z2 + 35*z4 + 3)/(16*sqrt(pi)) outputs[21] = 0.66904654355728921f*xz*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*xz*(3 - 7*z2)/(8*sqrt(pi)) outputs[22] = 0.47308734787878004f*(x2 - y2)*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*(x2 - y2)*(7*z2 - 1)/(8*sqrt(pi)) outputs[23] = 1.7701307697799304f*xz*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*xz*(-x2 + 3*y2)/(8*sqrt(pi)) outputs[24] = -3.7550144126950569f*x2*y2 + 0.62583573544917614f*x4 + 0.62583573544917614f*y4 ; // 3*sqrt(35)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) if (C <= 5) { return; } outputs[25] = 0.65638205684017015f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(154)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) outputs[26] = 8.3026492595241645f*xy*z*(x2 - y2) ; // 3*sqrt(385)*xy*z*(x2 - y2)/(4*sqrt(pi)) outputs[27] = -0.48923829943525038f*y*(3.0f*x2 - y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*y*(3*x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) outputs[28] = 4.7935367849733241f*xy*z*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xy*z*(3*z2 - 1)/(4*sqrt(pi)) outputs[29] = 0.45294665119569694f*y*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*y*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) outputs[30] = 0.1169503224534236f*z*(-70.0f*z2 + 63.0f*z4 + 15.0f) ; // sqrt(11)*z*(-70*z2 + 63*z4 + 15)/(16*sqrt(pi)) outputs[31] = 0.45294665119569694f*x*(14.0f*z2 - 21.0f*z4 - 1.0f) ; // sqrt(165)*x*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) outputs[32] = 2.3967683924866621f*z*(x2 - y2)*(3.0f*z2 - 1.0f) ; // sqrt(1155)*z*(x2 - y2)*(3*z2 - 1)/(8*sqrt(pi)) outputs[33] = -0.48923829943525038f*x*(x2 - 3.0f*y2)*(9.0f*z2 - 1.0f) ; // -sqrt(770)*x*(x2 - 3*y2)*(9*z2 - 1)/(32*sqrt(pi)) outputs[34] = 2.0756623148810411f*z*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) outputs[35] = 0.65638205684017015f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(154)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) if (C <= 6) { return; } outputs[36] = 1.3663682103838286f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // sqrt(6006)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) outputs[37] = 2.3666191622317521f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) outputs[38] = 2.0182596029148963f*xy*(x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*xy*(x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) outputs[39] = -0.92120525951492349f*yz*(3.0f*x2 - y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*yz*(3*x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) outputs[40] = 0.92120525951492349f*xy*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*xy*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) outputs[41] = 0.58262136251873131f*yz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*yz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) outputs[42] = 6.6747662381009842f*z2 - 20.024298714302954f*z4 + 14.684485723822165f*z6 - 0.31784601133814211f ; // sqrt(13)*(105*z2 - 315*z4 + 231*z6 - 5)/(32*sqrt(pi)) outputs[43] = 0.58262136251873131f*xz*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*xz*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) outputs[44] = 0.46060262975746175f*(x2 - y2)*(11.0f*z2*(3.0f*z2 - 1.0f) - 7.0f*z2 + 1.0f) ; // sqrt(2730)*(x2 - y2)*(11*z2*(3*z2 - 1) - 7*z2 + 1)/(64*sqrt(pi)) outputs[45] = -0.92120525951492349f*xz*(x2 - 3.0f*y2)*(11.0f*z2 - 3.0f) ; // -sqrt(2730)*xz*(x2 - 3*y2)*(11*z2 - 3)/(32*sqrt(pi)) outputs[46] = 0.50456490072872406f*(11.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(91)*(11*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) outputs[47] = 2.3666191622317521f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) outputs[48] = 10.247761577878714f*x2*y4 - 10.247761577878714f*x4*y2 + 0.6831841051919143f*x6 - 0.6831841051919143f*y6 ; // sqrt(6006)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) if (C <= 7) { return; } outputs[49] = 0.70716273252459627f*y*(-21.0f*x2*y4 + 35.0f*x4*y2 - 7.0f*x6 + y6) ; // 3*sqrt(715)*y*(-21*x2*y4 + 35*x4*y2 - 7*x6 + y6)/(64*sqrt(pi)) outputs[50] = 5.2919213236038001f*xy*z*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*z*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) outputs[51] = -0.51891557872026028f*y*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // -3*sqrt(385)*y*(13*z2 - 1)*(-10*x2*y2 + 5*x4 + y4)/(64*sqrt(pi)) outputs[52] = 4.1513246297620823f*xy*z*(x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xy*z*(x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) outputs[53] = -0.15645893386229404f*y*(3.0f*x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*y*(3*x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) outputs[54] = 0.44253269244498261f*xy*z*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xy*z*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) outputs[55] = 0.090331607582517306f*y*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*y*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) outputs[56] = 0.068284276912004949f*z*(315.0f*z2 - 693.0f*z4 + 429.0f*z6 - 35.0f) ; // sqrt(15)*z*(315*z2 - 693*z4 + 429*z6 - 35)/(32*sqrt(pi)) outputs[57] = 0.090331607582517306f*x*(-135.0f*z2 + 495.0f*z4 - 429.0f*z6 + 5.0f) ; // sqrt(105)*x*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) outputs[58] = 0.07375544874083044f*z*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*z*(x2 - y2)*(143*z2*(3*z2 - 1) - 187*z2 + 45)/(64*sqrt(pi)) outputs[59] = -0.15645893386229404f*x*(x2 - 3.0f*y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -3*sqrt(35)*x*(x2 - 3*y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) outputs[60] = 1.0378311574405206f*z*(13.0f*z2 - 3.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 3*sqrt(385)*z*(13*z2 - 3)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) outputs[61] = -0.51891557872026028f*x*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // -3*sqrt(385)*x*(13*z2 - 1)*(-10*x2*y2 + x4 + 5*y4)/(64*sqrt(pi)) outputs[62] = 2.6459606618019f*z*(15.0f*x2*y4 - 15.0f*x4*y2 + x6 - y6) ; // 3*sqrt(10010)*z*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) outputs[63] = 0.70716273252459627f*x*(-35.0f*x2*y4 + 21.0f*x4*y2 - x6 + 7.0f*y6) ; // 3*sqrt(715)*x*(-35*x2*y4 + 21*x4*y2 - x6 + 7*y6)/(64*sqrt(pi)) }; write_sh(); if (calc_grad_inputs) { scalar_t *dx = dy_dx + b * D * C2; scalar_t *dy = dx + C2; scalar_t *dz = dy + C2; auto write_sh_dx = [&]() { dx[0] = 0.0f ; // 0 if (C <= 1) { return; } dx[1] = 0.0f ; // 0 dx[2] = 0.0f ; // 0 dx[3] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) if (C <= 2) { return; } dx[4] = 1.0925484305920792f*y ; // sqrt(15)*y/(2*sqrt(pi)) dx[5] = 0.0f ; // 0 dx[6] = 0.0f ; // 0 dx[7] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) dx[8] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) if (C <= 3) { return; } dx[9] = -3.5402615395598609f*xy ; // -3*sqrt(70)*xy/(4*sqrt(pi)) dx[10] = 2.8906114426405538f*yz ; // sqrt(105)*yz/(2*sqrt(pi)) dx[11] = 0.0f ; // 0 dx[12] = 0.0f ; // 0 dx[13] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) dx[14] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) dx[15] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) if (C <= 4) { return; } dx[16] = 2.5033429417967046f*y*(3.0f*x2 - y2) ; // 3*sqrt(35)*y*(3*x2 - y2)/(4*sqrt(pi)) dx[17] = -10.620784618679583f*xy*z ; // -9*sqrt(70)*xy*z/(4*sqrt(pi)) dx[18] = 0.94617469575756008f*y*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*y*(7*z2 - 1)/(4*sqrt(pi)) dx[19] = 0.0f ; // 0 dx[20] = 0.0f ; // 0 dx[21] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) dx[22] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) dx[23] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) dx[24] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) if (C <= 5) { return; } dx[25] = 13.127641136803401f*xy*(-x2 + y2) ; // 15*sqrt(154)*xy*(-x2 + y2)/(8*sqrt(pi)) dx[26] = 8.3026492595241645f*yz*(3.0f*x2 - y2) ; // 3*sqrt(385)*yz*(3*x2 - y2)/(4*sqrt(pi)) dx[27] = 2.9354297966115022f*xy*(1.0f - 9.0f*z2) ; // 3*sqrt(770)*xy*(1 - 9*z2)/(16*sqrt(pi)) dx[28] = 4.7935367849733241f*yz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*yz*(3*z2 - 1)/(4*sqrt(pi)) dx[29] = 0.0f ; // 0 dx[30] = 0.0f ; // 0 dx[31] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) dx[32] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) dx[33] = -13.209434084751759f*x2*z2 + 1.4677148983057511f*x2 + 13.209434084751759f*y2*z2 - 1.4677148983057511f*y2 ; // 3*sqrt(770)*(-9*x2*z2 + x2 + 9*y2*z2 - y2)/(32*sqrt(pi)) dx[34] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) dx[35] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) if (C <= 6) { return; } dx[36] = 4.0991046311514854f*y*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 3*sqrt(6006)*y*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) dx[37] = 47.332383244635047f*xy*z*(-x2 + y2) ; // 15*sqrt(2002)*xy*z*(-x2 + y2)/(8*sqrt(pi)) dx[38] = 2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) dx[39] = 5.5272315570895412f*xy*z*(3.0f - 11.0f*z2) ; // 3*sqrt(2730)*xy*z*(3 - 11*z2)/(16*sqrt(pi)) dx[40] = 0.92120525951492349f*y*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*y*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dx[41] = 0.0f ; // 0 dx[42] = 0.0f ; // 0 dx[43] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) dx[44] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dx[45] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) dx[46] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) dx[47] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dx[48] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) if (C <= 7) { return; } dx[49] = 9.9002782553443485f*xy*(10.0f*x2*y2 - 3.0f*x4 - 3.0f*y4) ; // 21*sqrt(715)*xy*(10*x2*y2 - 3*x4 - 3*y4)/(32*sqrt(pi)) dx[50] = 15.875763970811402f*yz*(-10.0f*x2*y2 + 5.0f*x4 + y4) ; // 9*sqrt(10010)*yz*(-10*x2*y2 + 5*x4 + y4)/(32*sqrt(pi)) dx[51] = -10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // -15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) dx[52] = 4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) dx[53] = 0.93875360317376422f*xy*(66.0f*z2 - 143.0f*z4 - 3.0f) ; // 9*sqrt(35)*xy*(66*z2 - 143*z4 - 3)/(32*sqrt(pi)) dx[54] = 0.44253269244498261f*yz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*yz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dx[55] = 0.0f ; // 0 dx[56] = 0.0f ; // 0 dx[57] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) dx[58] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dx[59] = 30.97886890473422f*x2*z2 - 67.120882626924143f*x2*z4 - 1.4081304047606462f*x2 - 30.97886890473422f*y2*z2 + 67.120882626924143f*y2*z4 + 1.4081304047606462f*y2 ; // 9*sqrt(35)*(66*x2*z2 - 143*x2*z4 - 3*x2 - 66*y2*z2 + 143*y2*z4 + 3*y2)/(64*sqrt(pi)) dx[60] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) dx[61] = -0.51891557872026028f*(13.0f*z2 - 1.0f)*(-10.0f*x2*y2 + 4.0f*x2*(x2 - 5.0f*y2) + x4 + 5.0f*y4) ; // -3*sqrt(385)*(13*z2 - 1)*(-10*x2*y2 + 4*x2*(x2 - 5*y2) + x4 + 5*y4)/(64*sqrt(pi)) dx[62] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dx[63] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) }; auto write_sh_dy = [&]() { dy[0] = 0.0f ; // 0 if (C <= 1) { return; } dy[1] = -0.48860251190291992f ; // -sqrt(3)/(2*sqrt(pi)) dy[2] = 0.0f ; // 0 dy[3] = 0.0f ; // 0 if (C <= 2) { return; } dy[4] = 1.0925484305920792f*x ; // sqrt(15)*x/(2*sqrt(pi)) dy[5] = -1.0925484305920792f*z ; // -sqrt(15)*z/(2*sqrt(pi)) dy[6] = 0.0f ; // 0 dy[7] = 0.0f ; // 0 dy[8] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) if (C <= 3) { return; } dy[9] = -1.7701307697799304f*x2 + 1.7701307697799304f*y2 ; // 3*sqrt(70)*(-x2 + y2)/(8*sqrt(pi)) dy[10] = 2.8906114426405538f*xz ; // sqrt(105)*xz/(2*sqrt(pi)) dy[11] = 0.45704579946446572f - 2.2852289973223288f*z2 ; // sqrt(42)*(1 - 5*z2)/(8*sqrt(pi)) dy[12] = 0.0f ; // 0 dy[13] = 0.0f ; // 0 dy[14] = -2.8906114426405538f*yz ; // -sqrt(105)*yz/(2*sqrt(pi)) dy[15] = 3.5402615395598609f*xy ; // 3*sqrt(70)*xy/(4*sqrt(pi)) if (C <= 4) { return; } dy[16] = 2.5033429417967046f*x*(x2 - 3.0f*y2) ; // 3*sqrt(35)*x*(x2 - 3*y2)/(4*sqrt(pi)) dy[17] = 5.3103923093397913f*z*(-x2 + y2) ; // 9*sqrt(70)*z*(-x2 + y2)/(8*sqrt(pi)) dy[18] = 0.94617469575756008f*x*(7.0f*z2 - 1.0f) ; // 3*sqrt(5)*x*(7*z2 - 1)/(4*sqrt(pi)) dy[19] = 0.66904654355728921f*z*(3.0f - 7.0f*z2) ; // 3*sqrt(10)*z*(3 - 7*z2)/(8*sqrt(pi)) dy[20] = 0.0f ; // 0 dy[21] = 0.0f ; // 0 dy[22] = 0.94617469575756008f*y*(1.0f - 7.0f*z2) ; // 3*sqrt(5)*y*(1 - 7*z2)/(4*sqrt(pi)) dy[23] = 10.620784618679583f*xy*z ; // 9*sqrt(70)*xy*z/(4*sqrt(pi)) dy[24] = 2.5033429417967046f*y*(-3.0f*x2 + y2) ; // 3*sqrt(35)*y*(-3*x2 + y2)/(4*sqrt(pi)) if (C <= 5) { return; } dy[25] = 19.6914617052051f*x2*y2 - 3.2819102842008503f*x4 - 3.2819102842008503f*y4 ; // 15*sqrt(154)*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dy[26] = 8.3026492595241645f*xz*(x2 - 3.0f*y2) ; // 3*sqrt(385)*xz*(x2 - 3*y2)/(4*sqrt(pi)) dy[27] = -1.4677148983057511f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // -3*sqrt(770)*(x2 - y2)*(9*z2 - 1)/(32*sqrt(pi)) dy[28] = 4.7935367849733241f*xz*(3.0f*z2 - 1.0f) ; // sqrt(1155)*xz*(3*z2 - 1)/(4*sqrt(pi)) dy[29] = 6.3412531167397574f*z2 - 9.5118796751096362f*z4 - 0.45294665119569694f ; // sqrt(165)*(14*z2 - 21*z4 - 1)/(16*sqrt(pi)) dy[30] = 0.0f ; // 0 dy[31] = 0.0f ; // 0 dy[32] = 4.7935367849733241f*yz*(1.0f - 3.0f*z2) ; // sqrt(1155)*yz*(1 - 3*z2)/(4*sqrt(pi)) dy[33] = 2.9354297966115022f*xy*(9.0f*z2 - 1.0f) ; // 3*sqrt(770)*xy*(9*z2 - 1)/(16*sqrt(pi)) dy[34] = 8.3026492595241645f*yz*(-3.0f*x2 + y2) ; // 3*sqrt(385)*yz*(-3*x2 + y2)/(4*sqrt(pi)) dy[35] = 13.127641136803401f*xy*(x2 - y2) ; // 15*sqrt(154)*xy*(x2 - y2)/(8*sqrt(pi)) if (C <= 6) { return; } dy[36] = 4.0991046311514854f*x*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 3*sqrt(6006)*x*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dy[37] = 11.833095811158762f*z*(6.0f*x2*y2 - x4 - y4) ; // 15*sqrt(2002)*z*(6*x2*y2 - x4 - y4)/(32*sqrt(pi)) dy[38] = 2.0182596029148963f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // 3*sqrt(91)*x*(x2 - 3*y2)*(11*z2 - 1)/(8*sqrt(pi)) dy[39] = -2.7636157785447706f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // -3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(32*sqrt(pi)) dy[40] = 0.92120525951492349f*x*(-18.0f*z2 + 33.0f*z4 + 1.0f) ; // sqrt(2730)*x*(-18*z2 + 33*z4 + 1)/(32*sqrt(pi)) dy[41] = 0.58262136251873131f*z*(30.0f*z2 - 33.0f*z4 - 5.0f) ; // sqrt(273)*z*(30*z2 - 33*z4 - 5)/(16*sqrt(pi)) dy[42] = 0.0f ; // 0 dy[43] = 0.0f ; // 0 dy[44] = 0.92120525951492349f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // sqrt(2730)*y*(18*z2 - 33*z4 - 1)/(32*sqrt(pi)) dy[45] = 5.5272315570895412f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(16*sqrt(pi)) dy[46] = -2.0182596029148963f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(91)*y*(3*x2 - y2)*(11*z2 - 1)/(8*sqrt(pi)) dy[47] = 47.332383244635047f*xy*z*(x2 - y2) ; // 15*sqrt(2002)*xy*z*(x2 - y2)/(8*sqrt(pi)) dy[48] = 4.0991046311514854f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(6006)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) if (C <= 7) { return; } dy[49] = -74.252086915082614f*x2*y4 + 74.252086915082614f*x4*y2 - 4.9501391276721742f*x6 + 4.9501391276721742f*y6 ; // 21*sqrt(715)*(-15*x2*y4 + 15*x4*y2 - x6 + y6)/(64*sqrt(pi)) dy[50] = 15.875763970811402f*xz*(-10.0f*x2*y2 + x4 + 5.0f*y4) ; // 9*sqrt(10010)*xz*(-10*x2*y2 + x4 + 5*y4)/(32*sqrt(pi)) dy[51] = 0.51891557872026028f*(13.0f*z2 - 1.0f)*(10.0f*x2*y2 - 5.0f*x4 + 4.0f*y2*(5.0f*x2 - y2) - y4) ; // 3*sqrt(385)*(13*z2 - 1)*(10*x2*y2 - 5*x4 + 4*y2*(5*x2 - y2) - y4)/(64*sqrt(pi)) dy[52] = 4.1513246297620823f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // 3*sqrt(385)*xz*(x2 - 3*y2)*(13*z2 - 3)/(8*sqrt(pi)) dy[53] = -0.46937680158688211f*(x2 - y2)*(13.0f*z2*(11.0f*z2 - 3.0f) - 27.0f*z2 + 3.0f) ; // -9*sqrt(35)*(x2 - y2)*(13*z2*(11*z2 - 3) - 27*z2 + 3)/(64*sqrt(pi)) dy[54] = 0.44253269244498261f*xz*(-110.0f*z2 + 143.0f*z4 + 15.0f) ; // 3*sqrt(70)*xz*(-110*z2 + 143*z4 + 15)/(32*sqrt(pi)) dy[55] = -12.194767023639836f*z2 + 44.714145753346067f*z4 - 38.752259652899923f*z6 + 0.45165803791258652f ; // sqrt(105)*(-135*z2 + 495*z4 - 429*z6 + 5)/(64*sqrt(pi)) dy[56] = 0.0f ; // 0 dy[57] = 0.0f ; // 0 dy[58] = 0.44253269244498261f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 3*sqrt(70)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dy[59] = 0.93875360317376422f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 9*sqrt(35)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) dy[60] = -4.1513246297620823f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -3*sqrt(385)*yz*(3*x2 - y2)*(13*z2 - 3)/(8*sqrt(pi)) dy[61] = 10.378311574405206f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 15*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(16*sqrt(pi)) dy[62] = 15.875763970811402f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 9*sqrt(10010)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dy[63] = 9.9002782553443485f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 21*sqrt(715)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) }; auto write_sh_dz = [&]() { dz[0] = 0.0f ; // 0 if (C <= 1) { return; } dz[1] = 0.0f ; // 0 dz[2] = 0.48860251190291992f ; // sqrt(3)/(2*sqrt(pi)) dz[3] = 0.0f ; // 0 if (C <= 2) { return; } dz[4] = 0.0f ; // 0 dz[5] = -1.0925484305920792f*y ; // -sqrt(15)*y/(2*sqrt(pi)) dz[6] = 1.8923493915151202f*z ; // 3*sqrt(5)*z/(2*sqrt(pi)) dz[7] = -1.0925484305920792f*x ; // -sqrt(15)*x/(2*sqrt(pi)) dz[8] = 0.0f ; // 0 if (C <= 3) { return; } dz[9] = 0.0f ; // 0 dz[10] = 2.8906114426405538f*xy ; // sqrt(105)*xy/(2*sqrt(pi)) dz[11] = -4.5704579946446566f*yz ; // -5*sqrt(42)*yz/(4*sqrt(pi)) dz[12] = 5.597644988851731f*z2 - 1.1195289977703462f ; // 3*sqrt(7)*(5*z2 - 1)/(4*sqrt(pi)) dz[13] = -4.5704579946446566f*xz ; // -5*sqrt(42)*xz/(4*sqrt(pi)) dz[14] = 1.4453057213202769f*x2 - 1.4453057213202769f*y2 ; // sqrt(105)*(x2 - y2)/(4*sqrt(pi)) dz[15] = 0.0f ; // 0 if (C <= 4) { return; } dz[16] = 0.0f ; // 0 dz[17] = 1.7701307697799304f*y*(-3.0f*x2 + y2) ; // 3*sqrt(70)*y*(-3*x2 + y2)/(8*sqrt(pi)) dz[18] = 13.246445740605839f*xy*z ; // 21*sqrt(5)*xy*z/(2*sqrt(pi)) dz[19] = 2.0071396306718676f*y*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*y*(1 - 7*z2)/(8*sqrt(pi)) dz[20] = 14.809976568128603f*pow(z, 3) - 6.3471328149122579f*z ; // (105*z**3 - 45*z)/(4*sqrt(pi)) dz[21] = 2.0071396306718676f*x*(1.0f - 7.0f*z2) ; // 9*sqrt(10)*x*(1 - 7*z2)/(8*sqrt(pi)) dz[22] = 6.6232228703029197f*z*(x2 - y2) ; // 21*sqrt(5)*z*(x2 - y2)/(4*sqrt(pi)) dz[23] = 1.7701307697799304f*x*(-x2 + 3.0f*y2) ; // 3*sqrt(70)*x*(-x2 + 3*y2)/(8*sqrt(pi)) dz[24] = 0.0f ; // 0 if (C <= 5) { return; } dz[25] = 0.0f ; // 0 dz[26] = 8.3026492595241645f*xy*(x2 - y2) ; // 3*sqrt(385)*xy*(x2 - y2)/(4*sqrt(pi)) dz[27] = 8.8062893898345074f*yz*(-3.0f*x2 + y2) ; // 9*sqrt(770)*yz*(-3*x2 + y2)/(16*sqrt(pi)) dz[28] = 4.7935367849733241f*xy*(9.0f*z2 - 1.0f) ; // sqrt(1155)*xy*(9*z2 - 1)/(4*sqrt(pi)) dz[29] = 12.682506233479513f*yz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*yz*(1 - 3*z2)/(4*sqrt(pi)) dz[30] = -24.559567715218954f*z2 + 36.839351572828434f*z4 + 1.754254836801354f ; // 15*sqrt(11)*(-14*z2 + 21*z4 + 1)/(16*sqrt(pi)) dz[31] = 12.682506233479513f*xz*(1.0f - 3.0f*z2) ; // 7*sqrt(165)*xz*(1 - 3*z2)/(4*sqrt(pi)) dz[32] = 2.3967683924866621f*(x2 - y2)*(9.0f*z2 - 1.0f) ; // sqrt(1155)*(x2 - y2)*(9*z2 - 1)/(8*sqrt(pi)) dz[33] = 8.8062893898345074f*xz*(-x2 + 3.0f*y2) ; // 9*sqrt(770)*xz*(-x2 + 3*y2)/(16*sqrt(pi)) dz[34] = -12.453973889286246f*x2*y2 + 2.0756623148810411f*x4 + 2.0756623148810411f*y4 ; // 3*sqrt(385)*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) dz[35] = 0.0f ; // 0 if (C <= 6) { return; } dz[36] = 0.0f ; // 0 dz[37] = 2.3666191622317521f*y*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 3*sqrt(2002)*y*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dz[38] = 44.401711264127719f*xy*z*(x2 - y2) ; // 33*sqrt(91)*xy*z*(x2 - y2)/(4*sqrt(pi)) dz[39] = -2.7636157785447706f*y*(3.0f*x2 - y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*y*(3*x2 - y2)*(11*z2 - 1)/(32*sqrt(pi)) dz[40] = 11.054463114179082f*xy*z*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*xy*z*(11*z2 - 3)/(8*sqrt(pi)) dz[41] = 2.9131068125936568f*y*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*y*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) dz[42] = 2.6699064952403937f*z*(-30.0f*z2 + 33.0f*z4 + 5.0f) ; // 21*sqrt(13)*z*(-30*z2 + 33*z4 + 5)/(16*sqrt(pi)) dz[43] = 2.9131068125936568f*x*(18.0f*z2 - 33.0f*z4 - 1.0f) ; // 5*sqrt(273)*x*(18*z2 - 33*z4 - 1)/(16*sqrt(pi)) dz[44] = 5.5272315570895412f*z*(x2 - y2)*(11.0f*z2 - 3.0f) ; // 3*sqrt(2730)*z*(x2 - y2)*(11*z2 - 3)/(16*sqrt(pi)) dz[45] = -2.7636157785447706f*x*(x2 - 3.0f*y2)*(11.0f*z2 - 1.0f) ; // -3*sqrt(2730)*x*(x2 - 3*y2)*(11*z2 - 1)/(32*sqrt(pi)) dz[46] = 11.10042781603193f*z*(-6.0f*x2*y2 + x4 + y4) ; // 33*sqrt(91)*z*(-6*x2*y2 + x4 + y4)/(16*sqrt(pi)) dz[47] = 2.3666191622317521f*x*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 3*sqrt(2002)*x*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) dz[48] = 0.0f ; // 0 if (C <= 7) { return; } dz[49] = 0.0f ; // 0 dz[50] = 5.2919213236038001f*xy*(-10.0f*x2*y2 + 3.0f*x4 + 3.0f*y4) ; // 3*sqrt(10010)*xy*(-10*x2*y2 + 3*x4 + 3*y4)/(32*sqrt(pi)) dz[51] = 13.491805046726766f*yz*(10.0f*x2*y2 - 5.0f*x4 - y4) ; // 39*sqrt(385)*yz*(10*x2*y2 - 5*x4 - y4)/(32*sqrt(pi)) dz[52] = 12.453973889286248f*xy*(x2 - y2)*(13.0f*z2 - 1.0f) ; // 9*sqrt(385)*xy*(x2 - y2)*(13*z2 - 1)/(8*sqrt(pi)) dz[53] = -6.8841930899409371f*yz*(3.0f*x2 - y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*yz*(3*x2 - y2)*(13*z2 - 3)/(16*sqrt(pi)) dz[54] = 2.2126634622249131f*xy*(-66.0f*z2 + 143.0f*z4 + 3.0f) ; // 15*sqrt(70)*xy*(-66*z2 + 143*z4 + 3)/(32*sqrt(pi)) dz[55] = 1.6259689364853116f*yz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*yz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dz[56] = 64.528641681844675f*z2 - 236.60501950009714f*z4 + 205.05768356675085f*z6 - 2.3899496919201733f ; // 7*sqrt(15)*(135*z2 - 495*z4 + 429*z6 - 5)/(32*sqrt(pi)) dz[57] = 1.6259689364853116f*xz*(110.0f*z2 - 143.0f*z4 - 15.0f) ; // 9*sqrt(105)*xz*(110*z2 - 143*z4 - 15)/(32*sqrt(pi)) dz[58] = 0.07375544874083044f*(x2 - y2)*(143.0f*z2*(3.0f*z2 - 1.0f) + 132.0f*z2*(13.0f*z2 - 5.0f) - 187.0f*z2 + 45.0f) ; // sqrt(70)*(x2 - y2)*(143*z2*(3*z2 - 1) + 132*z2*(13*z2 - 5) - 187*z2 + 45)/(64*sqrt(pi)) dz[59] = -6.8841930899409371f*xz*(x2 - 3.0f*y2)*(13.0f*z2 - 3.0f) ; // -33*sqrt(35)*xz*(x2 - 3*y2)*(13*z2 - 3)/(16*sqrt(pi)) dz[60] = 3.1134934723215619f*(13.0f*z2 - 1.0f)*(-6.0f*x2*y2 + x4 + y4) ; // 9*sqrt(385)*(13*z2 - 1)*(-6*x2*y2 + x4 + y4)/(32*sqrt(pi)) dz[61] = 13.491805046726766f*xz*(10.0f*x2*y2 - x4 - 5.0f*y4) ; // 39*sqrt(385)*xz*(10*x2*y2 - x4 - 5*y4)/(32*sqrt(pi)) dz[62] = 39.6894099270285f*x2*y4 - 39.6894099270285f*x4*y2 + 2.6459606618019f*x6 - 2.6459606618019f*y6 ; // 3*sqrt(10010)*(15*x2*y4 - 15*x4*y2 + x6 - y6)/(64*sqrt(pi)) dz[63] = 0.0f ; // 0 }; write_sh_dx(); write_sh_dy(); write_sh_dz(); } } template <typename scalar_t> __global__ void kernel_sh_backward( const scalar_t * __restrict__ grad, const scalar_t * __restrict__ inputs, uint32_t B, uint32_t D, uint32_t C, const scalar_t * __restrict__ dy_dx, scalar_t * grad_inputs ) { const uint32_t t = threadIdx.x + blockIdx.x * blockDim.x; const uint32_t b = t / D; if (b >= B) return; const uint32_t d = t - b * D; const uint32_t C2 = C * C; // locate grad += b * C2; dy_dx += b * D * C2 + d * C2; for (int ch = 0; ch < C2; ch++) { grad_inputs[t] += grad[ch] * dy_dx[ch]; //printf("t=%d, b=%d, d=%d, ch=%d, grad=%f (+= %f * %f)\n", t, b, d, ch, grad_inputs[t], grad[ch], dy_dx[ch]); } } // inputs: [B, D], float, in [0, 1] // outputs: [B, L * C], float template <typename scalar_t> void sh_encode_forward_cuda(const scalar_t *inputs, scalar_t *outputs, const uint32_t B, const uint32_t D, const uint32_t C, const bool calc_grad_inputs, scalar_t *dy_dx) { static constexpr uint32_t N_THREADS = 256; kernel_sh<scalar_t><<<div_round_up(B, N_THREADS), N_THREADS>>>(inputs, outputs, B, D, C, calc_grad_inputs, dy_dx); } template <typename scalar_t> void sh_encode_backward_cuda(const scalar_t *grad, const scalar_t *inputs, const uint32_t B, const uint32_t D, const uint32_t C, scalar_t *dy_dx, scalar_t *grad_inputs) { static constexpr uint32_t N_THREADS = 256; kernel_sh_backward<scalar_t><<<div_round_up(B * D, N_THREADS), N_THREADS>>>(grad, inputs, B, D, C, dy_dx, grad_inputs); } void sh_encode_forward(at::Tensor inputs, at::Tensor outputs, const uint32_t B, const uint32_t D, const uint32_t C, const bool calc_grad_inputs, at::Tensor dy_dx) { CHECK_CUDA(inputs); CHECK_CUDA(outputs); CHECK_CUDA(dy_dx); CHECK_CONTIGUOUS(inputs); CHECK_CONTIGUOUS(outputs); CHECK_CONTIGUOUS(dy_dx); CHECK_IS_FLOATING(inputs); CHECK_IS_FLOATING(outputs); CHECK_IS_FLOATING(dy_dx); AT_DISPATCH_FLOATING_TYPES_AND_HALF( inputs.scalar_type(), "sh_encode_forward_cuda", ([&] { sh_encode_forward_cuda<scalar_t>(inputs.data_ptr<scalar_t>(), outputs.data_ptr<scalar_t>(), B, D, C, calc_grad_inputs, dy_dx.data_ptr<scalar_t>()); })); } void sh_encode_backward(at::Tensor grad, at::Tensor inputs, const uint32_t B, const uint32_t D, const uint32_t C, at::Tensor dy_dx, at::Tensor grad_inputs) { CHECK_CUDA(grad); CHECK_CUDA(inputs); CHECK_CUDA(dy_dx); CHECK_CUDA(grad_inputs); CHECK_CONTIGUOUS(grad); CHECK_CONTIGUOUS(inputs); CHECK_CONTIGUOUS(dy_dx); CHECK_CONTIGUOUS(grad_inputs); CHECK_IS_FLOATING(grad); CHECK_IS_FLOATING(inputs); CHECK_IS_FLOATING(dy_dx); CHECK_IS_FLOATING(grad_inputs); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad.scalar_type(), "sh_encode_backward_cuda", ([&] { sh_encode_backward_cuda<scalar_t>(grad.data_ptr<scalar_t>(), inputs.data_ptr<scalar_t>(), B, D, C, dy_dx.data_ptr<scalar_t>(), grad_inputs.data_ptr<scalar_t>()); })); }
dffc715a3855fede26aa552dc670ed56826f329f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // #include <math.h> #include "inlined_funcs.h" #include "global.h" #include "config.h" //user function __device__ void compute_bnd_node_flux_kernel_gpu( const int *g, const double *edge_weight, const double *variables_b, double *fluxes_b) { if ((*g) <= 2) { double p_b = variables_b[VAR_DENSITY]; #ifdef IDIVIDE double ip_b = 1.0 / p_b; #endif double pe_b, pressure_b; double3 velocity_b, momentum_b; double flux_contribution_i_momentum_x_b[NDIM], flux_contribution_i_momentum_y_b[NDIM], flux_contribution_i_momentum_z_b[NDIM], flux_contribution_i_density_energy_b[NDIM]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; #ifdef IDIVIDE compute_velocity(ip_b, momentum_b, velocity_b); #else compute_velocity(p_b, momentum_b, velocity_b); #endif double speed_sqd_b = compute_speed_sqd(velocity_b); double speed_b = std::sqrt(speed_sqd_b); pressure_b = compute_pressure(p_b, pe_b, speed_sqd_b); #ifdef IDIVIDE double speed_of_sound_b = compute_speed_of_sound(ip_b, pressure_b); #else double speed_of_sound_b = compute_speed_of_sound(p_b, pressure_b); #endif compute_flux_contribution(p_b, momentum_b, pe_b, pressure_b, velocity_b, flux_contribution_i_momentum_x_b, flux_contribution_i_momentum_y_b, flux_contribution_i_momentum_z_b, flux_contribution_i_density_energy_b); fluxes_b[VAR_DENSITY] += 0; fluxes_b[VAR_MOMENTUM +0] += edge_weight[0]*pressure_b; fluxes_b[VAR_MOMENTUM +1] += edge_weight[1]*pressure_b; fluxes_b[VAR_MOMENTUM +2] += edge_weight[2]*pressure_b; fluxes_b[VAR_DENSITY_ENERGY] += 0; } else if ((*g) == 3 || ((*g) >= 4 && (*g) <= 7) ) { double p_b = variables_b[VAR_DENSITY]; #ifdef IDIVIDE double ip_b = 1.0 / p_b; #endif double pe_b, pressure_b; double3 velocity_b, momentum_b; double flux_contribution_i_momentum_x_b[NDIM], flux_contribution_i_momentum_y_b[NDIM], flux_contribution_i_momentum_z_b[NDIM], flux_contribution_i_density_energy_b[NDIM]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; #ifdef IDIVIDE compute_velocity(ip_b, momentum_b, velocity_b); #else compute_velocity(p_b, momentum_b, velocity_b); #endif double speed_sqd_b = compute_speed_sqd(velocity_b); double speed_b = std::sqrt(speed_sqd_b); pressure_b = compute_pressure(p_b, pe_b, speed_sqd_b); #ifdef IDIVIDE double speed_of_sound_b = compute_speed_of_sound(ip_b, pressure_b); #else double speed_of_sound_b = compute_speed_of_sound(p_b, pressure_b); #endif compute_flux_contribution(p_b, momentum_b, pe_b, pressure_b, velocity_b, flux_contribution_i_momentum_x_b, flux_contribution_i_momentum_y_b, flux_contribution_i_momentum_z_b, flux_contribution_i_density_energy_b); double factor_x = 0.5 * edge_weight[0], factor_y = 0.5 * edge_weight[1], factor_z = 0.5 * edge_weight[2]; fluxes_b[VAR_DENSITY] += factor_x*(ff_variable_cuda[VAR_MOMENTUM+0] + momentum_b.x) + factor_y*(ff_variable_cuda[VAR_MOMENTUM+1] + momentum_b.y) + factor_z*(ff_variable_cuda[VAR_MOMENTUM+2] + momentum_b.z); fluxes_b[VAR_DENSITY_ENERGY] += factor_x*(ff_flux_contribution_density_energy_cuda[0] + flux_contribution_i_density_energy_b[0]) + factor_y*(ff_flux_contribution_density_energy_cuda[1] + flux_contribution_i_density_energy_b[1]) + factor_z*(ff_flux_contribution_density_energy_cuda[2] + flux_contribution_i_density_energy_b[2]); fluxes_b[VAR_MOMENTUM + 0] += factor_x*(ff_flux_contribution_momentum_x_cuda[0] + flux_contribution_i_momentum_x_b[0]) + factor_y*(ff_flux_contribution_momentum_x_cuda[1] + flux_contribution_i_momentum_x_b[1]) + factor_z*(ff_flux_contribution_momentum_x_cuda[2] + flux_contribution_i_momentum_x_b[2]); fluxes_b[VAR_MOMENTUM + 1] += factor_x*(ff_flux_contribution_momentum_y_cuda[0] + flux_contribution_i_momentum_y_b[0]) + factor_y*(ff_flux_contribution_momentum_y_cuda[1] + flux_contribution_i_momentum_y_b[1]) + factor_z*(ff_flux_contribution_momentum_y_cuda[2] + flux_contribution_i_momentum_y_b[2]); fluxes_b[VAR_MOMENTUM + 2] += factor_x*(ff_flux_contribution_momentum_z_cuda[0] + flux_contribution_i_momentum_z_b[0]) + factor_y*(ff_flux_contribution_momentum_z_cuda[1] + flux_contribution_i_momentum_z_b[1]) + factor_z*(ff_flux_contribution_momentum_z_cuda[2] + flux_contribution_i_momentum_z_b[2]); } } // CUDA kernel function __global__ void op_cuda_compute_bnd_node_flux_kernel( const double *__restrict ind_arg0, double *__restrict ind_arg1, const int *__restrict opDat2Map, const int *__restrict arg0, const double *__restrict arg1, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg3_l[5]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map2idx; if (n<nelem) { //initialise local variables for ( int d=0; d<5; d++ ){ arg3_l[d] = ZERO_double; } map2idx = opDat2Map[n + offset_b + set_size * 0]; //user-supplied kernel call compute_bnd_node_flux_kernel_gpu(arg0+(n+offset_b)*1, arg1+(n+offset_b)*3, ind_arg0+map2idx*5, arg3_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg3_l[0] += ind_arg1[0+map2idx*5]; arg3_l[1] += ind_arg1[1+map2idx*5]; arg3_l[2] += ind_arg1[2+map2idx*5]; arg3_l[3] += ind_arg1[3+map2idx*5]; arg3_l[4] += ind_arg1[4+map2idx*5]; ind_arg1[0+map2idx*5] = arg3_l[0]; ind_arg1[1+map2idx*5] = arg3_l[1]; ind_arg1[2+map2idx*5] = arg3_l[2]; ind_arg1[3+map2idx*5] = arg3_l[3]; ind_arg1[4+map2idx*5] = arg3_l[4]; } __syncthreads(); } } } //host stub function void op_par_loop_compute_bnd_node_flux_kernel(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(10); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[10].name = name; OP_kernels[10].count += 1; int ninds = 2; int inds[4] = {-1,-1,0,1}; if (OP_diags>2) { printf(" kernel routine with indirection: compute_bnd_node_flux_kernel\n"); } //get plan #ifdef OP_PART_SIZE_10 int part_size = OP_PART_SIZE_10; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set_size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_10 int nthread = OP_BLOCK_SIZE_10; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { hipLaunchKernelGGL(( op_cuda_compute_bnd_node_flux_kernel), dim3(nblocks),dim3(nthread), 0, 0, (double *)arg2.data_d, (double *)arg3.data_d, arg2.map_data_d, (int*)arg0.data_d, (double*)arg1.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[10].transfer += Plan->transfer; OP_kernels[10].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[10].time += wall_t2 - wall_t1; }
dffc715a3855fede26aa552dc670ed56826f329f.cu
// // auto-generated by op2.py // #include <math.h> #include "inlined_funcs.h" #include "global.h" #include "config.h" //user function __device__ void compute_bnd_node_flux_kernel_gpu( const int *g, const double *edge_weight, const double *variables_b, double *fluxes_b) { if ((*g) <= 2) { double p_b = variables_b[VAR_DENSITY]; #ifdef IDIVIDE double ip_b = 1.0 / p_b; #endif double pe_b, pressure_b; double3 velocity_b, momentum_b; double flux_contribution_i_momentum_x_b[NDIM], flux_contribution_i_momentum_y_b[NDIM], flux_contribution_i_momentum_z_b[NDIM], flux_contribution_i_density_energy_b[NDIM]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; #ifdef IDIVIDE compute_velocity(ip_b, momentum_b, velocity_b); #else compute_velocity(p_b, momentum_b, velocity_b); #endif double speed_sqd_b = compute_speed_sqd(velocity_b); double speed_b = std::sqrt(speed_sqd_b); pressure_b = compute_pressure(p_b, pe_b, speed_sqd_b); #ifdef IDIVIDE double speed_of_sound_b = compute_speed_of_sound(ip_b, pressure_b); #else double speed_of_sound_b = compute_speed_of_sound(p_b, pressure_b); #endif compute_flux_contribution(p_b, momentum_b, pe_b, pressure_b, velocity_b, flux_contribution_i_momentum_x_b, flux_contribution_i_momentum_y_b, flux_contribution_i_momentum_z_b, flux_contribution_i_density_energy_b); fluxes_b[VAR_DENSITY] += 0; fluxes_b[VAR_MOMENTUM +0] += edge_weight[0]*pressure_b; fluxes_b[VAR_MOMENTUM +1] += edge_weight[1]*pressure_b; fluxes_b[VAR_MOMENTUM +2] += edge_weight[2]*pressure_b; fluxes_b[VAR_DENSITY_ENERGY] += 0; } else if ((*g) == 3 || ((*g) >= 4 && (*g) <= 7) ) { double p_b = variables_b[VAR_DENSITY]; #ifdef IDIVIDE double ip_b = 1.0 / p_b; #endif double pe_b, pressure_b; double3 velocity_b, momentum_b; double flux_contribution_i_momentum_x_b[NDIM], flux_contribution_i_momentum_y_b[NDIM], flux_contribution_i_momentum_z_b[NDIM], flux_contribution_i_density_energy_b[NDIM]; momentum_b.x = variables_b[VAR_MOMENTUM+0]; momentum_b.y = variables_b[VAR_MOMENTUM+1]; momentum_b.z = variables_b[VAR_MOMENTUM+2]; pe_b = variables_b[VAR_DENSITY_ENERGY]; #ifdef IDIVIDE compute_velocity(ip_b, momentum_b, velocity_b); #else compute_velocity(p_b, momentum_b, velocity_b); #endif double speed_sqd_b = compute_speed_sqd(velocity_b); double speed_b = std::sqrt(speed_sqd_b); pressure_b = compute_pressure(p_b, pe_b, speed_sqd_b); #ifdef IDIVIDE double speed_of_sound_b = compute_speed_of_sound(ip_b, pressure_b); #else double speed_of_sound_b = compute_speed_of_sound(p_b, pressure_b); #endif compute_flux_contribution(p_b, momentum_b, pe_b, pressure_b, velocity_b, flux_contribution_i_momentum_x_b, flux_contribution_i_momentum_y_b, flux_contribution_i_momentum_z_b, flux_contribution_i_density_energy_b); double factor_x = 0.5 * edge_weight[0], factor_y = 0.5 * edge_weight[1], factor_z = 0.5 * edge_weight[2]; fluxes_b[VAR_DENSITY] += factor_x*(ff_variable_cuda[VAR_MOMENTUM+0] + momentum_b.x) + factor_y*(ff_variable_cuda[VAR_MOMENTUM+1] + momentum_b.y) + factor_z*(ff_variable_cuda[VAR_MOMENTUM+2] + momentum_b.z); fluxes_b[VAR_DENSITY_ENERGY] += factor_x*(ff_flux_contribution_density_energy_cuda[0] + flux_contribution_i_density_energy_b[0]) + factor_y*(ff_flux_contribution_density_energy_cuda[1] + flux_contribution_i_density_energy_b[1]) + factor_z*(ff_flux_contribution_density_energy_cuda[2] + flux_contribution_i_density_energy_b[2]); fluxes_b[VAR_MOMENTUM + 0] += factor_x*(ff_flux_contribution_momentum_x_cuda[0] + flux_contribution_i_momentum_x_b[0]) + factor_y*(ff_flux_contribution_momentum_x_cuda[1] + flux_contribution_i_momentum_x_b[1]) + factor_z*(ff_flux_contribution_momentum_x_cuda[2] + flux_contribution_i_momentum_x_b[2]); fluxes_b[VAR_MOMENTUM + 1] += factor_x*(ff_flux_contribution_momentum_y_cuda[0] + flux_contribution_i_momentum_y_b[0]) + factor_y*(ff_flux_contribution_momentum_y_cuda[1] + flux_contribution_i_momentum_y_b[1]) + factor_z*(ff_flux_contribution_momentum_y_cuda[2] + flux_contribution_i_momentum_y_b[2]); fluxes_b[VAR_MOMENTUM + 2] += factor_x*(ff_flux_contribution_momentum_z_cuda[0] + flux_contribution_i_momentum_z_b[0]) + factor_y*(ff_flux_contribution_momentum_z_cuda[1] + flux_contribution_i_momentum_z_b[1]) + factor_z*(ff_flux_contribution_momentum_z_cuda[2] + flux_contribution_i_momentum_z_b[2]); } } // CUDA kernel function __global__ void op_cuda_compute_bnd_node_flux_kernel( const double *__restrict ind_arg0, double *__restrict ind_arg1, const int *__restrict opDat2Map, const int *__restrict arg0, const double *__restrict arg1, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg3_l[5]; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map2idx; if (n<nelem) { //initialise local variables for ( int d=0; d<5; d++ ){ arg3_l[d] = ZERO_double; } map2idx = opDat2Map[n + offset_b + set_size * 0]; //user-supplied kernel call compute_bnd_node_flux_kernel_gpu(arg0+(n+offset_b)*1, arg1+(n+offset_b)*3, ind_arg0+map2idx*5, arg3_l); col2 = colors[n+offset_b]; } //store local variables for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg3_l[0] += ind_arg1[0+map2idx*5]; arg3_l[1] += ind_arg1[1+map2idx*5]; arg3_l[2] += ind_arg1[2+map2idx*5]; arg3_l[3] += ind_arg1[3+map2idx*5]; arg3_l[4] += ind_arg1[4+map2idx*5]; ind_arg1[0+map2idx*5] = arg3_l[0]; ind_arg1[1+map2idx*5] = arg3_l[1]; ind_arg1[2+map2idx*5] = arg3_l[2]; ind_arg1[3+map2idx*5] = arg3_l[3]; ind_arg1[4+map2idx*5] = arg3_l[4]; } __syncthreads(); } } } //host stub function void op_par_loop_compute_bnd_node_flux_kernel(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3){ int nargs = 4; op_arg args[4]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(10); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[10].name = name; OP_kernels[10].count += 1; int ninds = 2; int inds[4] = {-1,-1,0,1}; if (OP_diags>2) { printf(" kernel routine with indirection: compute_bnd_node_flux_kernel\n"); } //get plan #ifdef OP_PART_SIZE_10 int part_size = OP_PART_SIZE_10; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set_size > 0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_10 int nthread = OP_BLOCK_SIZE_10; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { op_cuda_compute_bnd_node_flux_kernel<<<nblocks,nthread>>>( (double *)arg2.data_d, (double *)arg3.data_d, arg2.map_data_d, (int*)arg0.data_d, (double*)arg1.data_d, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[10].transfer += Plan->transfer; OP_kernels[10].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[10].time += wall_t2 - wall_t1; }
eabcb04c6472d3a251d83f5c852537fbc788d701.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> // The device kernel __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x * blockIdx.x; x[tid] = (float)threadIdx.x; } int main() { // Setup float *hx, *dx; int blocks = 2; int threads = 8; int size = blocks*threads; // Allocate host and device memory hx = (float*)malloc(size * sizeof(float)); hipMalloc((void**)&dx, size * sizeof(float)); // Execute kernel my_first_kernel << <blocks, threads >> > (dx); // Copy device memory back to host memory hipMemcpy(hx, dx, size * sizeof(float), hipMemcpyDeviceToHost); // Output results for (int i = 0; i < size; i++) { printf(" n, x = %d %f\n", i, hx[i]); } // Free memory hipFree(dx); free(hx); }
eabcb04c6472d3a251d83f5c852537fbc788d701.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> // The device kernel __global__ void my_first_kernel(float *x) { int tid = threadIdx.x + blockDim.x * blockIdx.x; x[tid] = (float)threadIdx.x; } int main() { // Setup float *hx, *dx; int blocks = 2; int threads = 8; int size = blocks*threads; // Allocate host and device memory hx = (float*)malloc(size * sizeof(float)); cudaMalloc((void**)&dx, size * sizeof(float)); // Execute kernel my_first_kernel << <blocks, threads >> > (dx); // Copy device memory back to host memory cudaMemcpy(hx, dx, size * sizeof(float), cudaMemcpyDeviceToHost); // Output results for (int i = 0; i < size; i++) { printf(" n, x = %d %f\n", i, hx[i]); } // Free memory cudaFree(dx); free(hx); }
734d64267da2edce33166972738ce0b8c3ce4008.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2013 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <inttypes.h> #include <signal.h> #include <sys/inotify.h> // Listens for incoming files to process #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <sodium.h> // For random byte generation #include <unistd.h> #include "hashcash.gpu.h" // Struct and macro definitions #include "hip/hip_runtime.h" // GPU orientated functionality #include "hashcash.gpu.init.c" // Initialisation functions kept separately /* * For long inputs, we can compute the first blocks of the hash on the CPU */ inline uint16_t hashcash_sha_block(unsigned char * model,uint32_t * buffer,uint16_t offset) { uint16_t i; memset(buffer,0,64); for(i = offset;i < offset + 64;i++) { if(model[i] == 0) return i - offset; buffer[(i >> 2) & 15] += (model[i] << (24 - ((i & 3) * 8))); } return 64; } /* * Process incoming files with work to do */ int hashcash_process_file(char * filename_in) { hipSetDevice(0); hipDeviceReset(); // Defaults, per file // If you wanted to customise this per file, perhaps put the nbits variable in the filename and parse that. uint8_t nbits = settings->nbits; // An intermediate processing file is used. This way, an additional inotify could be placed on the --output folder to deal with completed jobs char * base = basename(filename_in); char * filename_proc = NULL; if(-1 == asprintf(&filename_proc,"%s%s",settings->process_folder,base)) return -1; // Open the files to read from and write to FILE *fpin = fopen(filename_in,"r"); FILE *fpout = fopen(filename_proc,"w"); if(fpin == NULL) { fprintf(stderr,"Invalid input file %s\n",filename_in); exit(0); } if(fpout == NULL) { fprintf(stderr,"Invalid output file %s\n",filename_proc); exit(0); } // If verbose, some stats will be written to STDOUT #ifdef VERBOSE uint64_t total_computed = 0; uint64_t total_rows = 0; float total_time = 0.0000; printf("\nSHA computed\tLoop time (ms)\t\tRate (Mhash/s)\n"); #endif while(!feof(fpin)) { unsigned char model[320] = {0}; char date[7]; char address[257]; memset(address,0,257); memset(model,0,320); memset(date,0,7); // If there is a line that has an email address longer than 255 chars, or a date that is not 6 chars in length, we're going to ignore it // Proper formatting of the date is up to the user (should by YYMMDD) if(fscanf(fpin,"%255[^\t]\t%6[^\n]\n",address,date) != 2) continue; // A unique string, used so that emails sent to the user on the same day have a unique hashcash token char randstr[10]; randombytes_buf(randstr,10); for(int i = 0;i < 10;i++) randstr[i] = hexchar[(randstr[i] & 63)]; uint32_t model_len = sprintf((char *) model,"1:%d:%s:%s::%.*s:",nbits,date,address,10,randstr); // Need to pad awkwardly lengthed inputs so the CPU can preprocess all but the last block if((model_len & 63) > 50) { while(model_len & 63) model[model_len++] = '0'; } // Add the model string to an 16*uint32_t buffer uint32_t state[5] = {0x67452301,0xEFCDAB89,0x98BADCFE,0x10325476,0xC3D2E1F0}; uint32_t buffer[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; // If the model is >56 bytes, the first blocks need processed, this can be done trivially on the CPU uint16_t curpos; uint16_t cur_block = 0; while((curpos = hashcash_sha_block(model,buffer,cur_block++ * 64)) == 64) sha_transform(state,buffer); // Space for counter model_len += 5; // Add length to last 8 bytes if((buffer[15] += model_len << 3) < (model_len << 3)) buffer[15]++; buffer[14] += (model_len >> 29); // Space for counter uint8_t bytes[5] = {0,0,0,0,0}; for(int i = 0;i < 5;i++) bytes[i] = curpos++ >> 2; // Pad with 128, string already padded with 0's. buffer[curpos>>2] |= (128 << (24 - ((curpos & 3)* 8))); // Memory to copy to and from the GPU uint8_t *d_bytes; uint32_t *d_state,*d_buffer,*d_answer; unsigned long long int *d_increments; uint32_t answer = 0; unsigned long long int increments = 0; // Create and assign some memory on the CUDA device CUDA_E(hipMalloc((void**)&d_state,20)); CUDA_E(hipMalloc((void**)&d_buffer,64)); CUDA_E(hipMalloc((void**)&d_increments,8)); CUDA_E(hipMalloc((void**)&d_answer,4)); CUDA_E(hipMalloc((void**)&d_bytes,5)); CUDA_E(hipMemcpy((void *)d_state, state,20, hipMemcpyHostToDevice)); CUDA_E(hipMemcpy((void *)d_buffer,buffer,64, hipMemcpyHostToDevice)); CUDA_E(hipMemcpy((void *)d_increments, &increments,8,hipMemcpyHostToDevice)); CUDA_E(hipMemcpy((void *)d_answer, &answer,4, hipMemcpyHostToDevice)); CUDA_E(hipMemcpy((void *)d_bytes, &bytes,5, hipMemcpyHostToDevice)); // hashit() with timing hipEvent_t start, stop; float time; CUDA_E(hipEventCreate(&start)); CUDA_E(hipEventCreate(&stop)); CUDA_E(hipEventRecord(start, 0)); hipLaunchKernelGGL(( hashIt), dim3(settings->ngroups),dim3(settings->nthreads), 0, 0, d_bytes,d_answer,d_increments,d_state,d_buffer,settings->nthreads * settings->ngroups,(1 << (32 - nbits)) - 1); CUDA_E(hipEventRecord(stop, 0)); CUDA_E(hipGetLastError()); CUDA_E(hipDeviceSynchronize()); CUDA_E(hipMemcpy(&increments,d_increments,sizeof(unsigned long long int), hipMemcpyDeviceToHost)); CUDA_E(hipMemcpy(&answer,d_answer,sizeof(uint32_t), hipMemcpyDeviceToHost)); CUDA_E(hipEventElapsedTime(&time, start, stop)); CUDA_E(hipEventDestroy(start)); CUDA_E(hipEventDestroy(stop)); // Free up assigned memory CUDA_E(hipFree((void*)d_state)); CUDA_E(hipFree((void*)d_buffer)); CUDA_E(hipFree((void*)d_increments)); CUDA_E(hipFree((void*)d_answer)); CUDA_E(hipFree((void*)d_bytes)); for(int i = 0;i < 5;i++) { model[--model_len] = hexchar[answer & 0x3F]; answer >>= 6; } fprintf(fpout,"%s\n",model); #ifdef VERBOSE printf("%12llu\t%14.5f\t%20.10f\n",increments,time,increments/(time*1000)); total_computed += increments; total_time += time; ++total_rows; #endif } #ifdef VERBOSE printf("JOB %s DONE\n%lu hashes computed in %f seconds from %lu rows, averaging %f seconds per row.\n",filename_in,total_computed,total_time / 1000,total_rows,(total_time / 1000) / total_rows); #endif fclose(fpin); fclose(fpout); // Move the output to its final destination char * filename_out = NULL; if(-1 == asprintf(&filename_out,"%s%s",settings->output_folder,base)) return -1; rename(filename_proc,filename_out); // Cleanup unlink(filename_in); free(filename_in); free(filename_proc); free(filename_out); return 0; } int main (int argc, char** argv) { // Declare default settings settings = (struct settings *) calloc(1,sizeof(struct settings)); settings->nbits = 26; settings->nthreads = 256; settings->ngroups = 128; args_init(argc,argv); // CUDA config hipDeviceSetCacheConfig(hipFuncCachePreferShared); hipGetDeviceCount(&settings->devices); // Inotify preparation inotify = (struct inotify *) calloc(1,sizeof(struct inotify)); inotify->fd = inotify_init(); strcpy(inotify->watchfolder,settings->input_folder); inotify->type = IN_CLOSE_WRITE | IN_MOVED_TO; inotify->wd = inotify_add_watch(inotify->fd,inotify->watchfolder,inotify->type); // An endless loop of inotify notifications while(1) { // Wait for a new notification memset(inotify->buffer,0,EVENT_BUF_LEN); int length = read(inotify->fd,inotify->buffer,EVENT_BUF_LEN); if(length < 0) break; int i = 0; // Process 1 or more notifications that have been sent here while (i < length) { inotify->event = ( struct inotify_event * ) &inotify->buffer[i]; if(inotify->event->len && !(inotify->event->mask & IN_ISDIR)) { char * eventname = NULL; if(-1 != asprintf(&eventname,"%s%s",settings->input_folder,inotify->event->name)) { // Process the incoming file (*hashcash_process_file)(eventname); // You could install a signal handler to break out of this endless loop by setting settings->exit to a non-zero value if(settings->exit) break; } else free(eventname); } i += EVENT_SIZE + inotify->event->len; } if(settings->exit) break; } // If we ever do arrive here, here's the cleanup inotify_rm_watch(inotify->fd,inotify->wd); close(inotify->fd); free(inotify); free(settings->input_folder); free(settings->output_folder); free(settings->process_folder); free(settings); }
734d64267da2edce33166972738ce0b8c3ce4008.cu
/* * Copyright (c) 2013 * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <inttypes.h> #include <signal.h> #include <sys/inotify.h> // Listens for incoming files to process #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <sodium.h> // For random byte generation #include <unistd.h> #include "hashcash.gpu.h" // Struct and macro definitions #include "cuda.h" // GPU orientated functionality #include "hashcash.gpu.init.c" // Initialisation functions kept separately /* * For long inputs, we can compute the first blocks of the hash on the CPU */ inline uint16_t hashcash_sha_block(unsigned char * model,uint32_t * buffer,uint16_t offset) { uint16_t i; memset(buffer,0,64); for(i = offset;i < offset + 64;i++) { if(model[i] == 0) return i - offset; buffer[(i >> 2) & 15] += (model[i] << (24 - ((i & 3) * 8))); } return 64; } /* * Process incoming files with work to do */ int hashcash_process_file(char * filename_in) { cudaSetDevice(0); cudaDeviceReset(); // Defaults, per file // If you wanted to customise this per file, perhaps put the nbits variable in the filename and parse that. uint8_t nbits = settings->nbits; // An intermediate processing file is used. This way, an additional inotify could be placed on the --output folder to deal with completed jobs char * base = basename(filename_in); char * filename_proc = NULL; if(-1 == asprintf(&filename_proc,"%s%s",settings->process_folder,base)) return -1; // Open the files to read from and write to FILE *fpin = fopen(filename_in,"r"); FILE *fpout = fopen(filename_proc,"w"); if(fpin == NULL) { fprintf(stderr,"Invalid input file %s\n",filename_in); exit(0); } if(fpout == NULL) { fprintf(stderr,"Invalid output file %s\n",filename_proc); exit(0); } // If verbose, some stats will be written to STDOUT #ifdef VERBOSE uint64_t total_computed = 0; uint64_t total_rows = 0; float total_time = 0.0000; printf("\nSHA computed\tLoop time (ms)\t\tRate (Mhash/s)\n"); #endif while(!feof(fpin)) { unsigned char model[320] = {0}; char date[7]; char address[257]; memset(address,0,257); memset(model,0,320); memset(date,0,7); // If there is a line that has an email address longer than 255 chars, or a date that is not 6 chars in length, we're going to ignore it // Proper formatting of the date is up to the user (should by YYMMDD) if(fscanf(fpin,"%255[^\t]\t%6[^\n]\n",address,date) != 2) continue; // A unique string, used so that emails sent to the user on the same day have a unique hashcash token char randstr[10]; randombytes_buf(randstr,10); for(int i = 0;i < 10;i++) randstr[i] = hexchar[(randstr[i] & 63)]; uint32_t model_len = sprintf((char *) model,"1:%d:%s:%s::%.*s:",nbits,date,address,10,randstr); // Need to pad awkwardly lengthed inputs so the CPU can preprocess all but the last block if((model_len & 63) > 50) { while(model_len & 63) model[model_len++] = '0'; } // Add the model string to an 16*uint32_t buffer uint32_t state[5] = {0x67452301,0xEFCDAB89,0x98BADCFE,0x10325476,0xC3D2E1F0}; uint32_t buffer[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; // If the model is >56 bytes, the first blocks need processed, this can be done trivially on the CPU uint16_t curpos; uint16_t cur_block = 0; while((curpos = hashcash_sha_block(model,buffer,cur_block++ * 64)) == 64) sha_transform(state,buffer); // Space for counter model_len += 5; // Add length to last 8 bytes if((buffer[15] += model_len << 3) < (model_len << 3)) buffer[15]++; buffer[14] += (model_len >> 29); // Space for counter uint8_t bytes[5] = {0,0,0,0,0}; for(int i = 0;i < 5;i++) bytes[i] = curpos++ >> 2; // Pad with 128, string already padded with 0's. buffer[curpos>>2] |= (128 << (24 - ((curpos & 3)* 8))); // Memory to copy to and from the GPU uint8_t *d_bytes; uint32_t *d_state,*d_buffer,*d_answer; unsigned long long int *d_increments; uint32_t answer = 0; unsigned long long int increments = 0; // Create and assign some memory on the CUDA device CUDA_E(cudaMalloc((void**)&d_state,20)); CUDA_E(cudaMalloc((void**)&d_buffer,64)); CUDA_E(cudaMalloc((void**)&d_increments,8)); CUDA_E(cudaMalloc((void**)&d_answer,4)); CUDA_E(cudaMalloc((void**)&d_bytes,5)); CUDA_E(cudaMemcpy((void *)d_state, state,20, cudaMemcpyHostToDevice)); CUDA_E(cudaMemcpy((void *)d_buffer,buffer,64, cudaMemcpyHostToDevice)); CUDA_E(cudaMemcpy((void *)d_increments, &increments,8,cudaMemcpyHostToDevice)); CUDA_E(cudaMemcpy((void *)d_answer, &answer,4, cudaMemcpyHostToDevice)); CUDA_E(cudaMemcpy((void *)d_bytes, &bytes,5, cudaMemcpyHostToDevice)); // hashit() with timing cudaEvent_t start, stop; float time; CUDA_E(cudaEventCreate(&start)); CUDA_E(cudaEventCreate(&stop)); CUDA_E(cudaEventRecord(start, 0)); hashIt<<<settings->ngroups,settings->nthreads>>>(d_bytes,d_answer,d_increments,d_state,d_buffer,settings->nthreads * settings->ngroups,(1 << (32 - nbits)) - 1); CUDA_E(cudaEventRecord(stop, 0)); CUDA_E(cudaGetLastError()); CUDA_E(cudaThreadSynchronize()); CUDA_E(cudaMemcpy(&increments,d_increments,sizeof(unsigned long long int), cudaMemcpyDeviceToHost)); CUDA_E(cudaMemcpy(&answer,d_answer,sizeof(uint32_t), cudaMemcpyDeviceToHost)); CUDA_E(cudaEventElapsedTime(&time, start, stop)); CUDA_E(cudaEventDestroy(start)); CUDA_E(cudaEventDestroy(stop)); // Free up assigned memory CUDA_E(cudaFree((void*)d_state)); CUDA_E(cudaFree((void*)d_buffer)); CUDA_E(cudaFree((void*)d_increments)); CUDA_E(cudaFree((void*)d_answer)); CUDA_E(cudaFree((void*)d_bytes)); for(int i = 0;i < 5;i++) { model[--model_len] = hexchar[answer & 0x3F]; answer >>= 6; } fprintf(fpout,"%s\n",model); #ifdef VERBOSE printf("%12llu\t%14.5f\t%20.10f\n",increments,time,increments/(time*1000)); total_computed += increments; total_time += time; ++total_rows; #endif } #ifdef VERBOSE printf("JOB %s DONE\n%lu hashes computed in %f seconds from %lu rows, averaging %f seconds per row.\n",filename_in,total_computed,total_time / 1000,total_rows,(total_time / 1000) / total_rows); #endif fclose(fpin); fclose(fpout); // Move the output to its final destination char * filename_out = NULL; if(-1 == asprintf(&filename_out,"%s%s",settings->output_folder,base)) return -1; rename(filename_proc,filename_out); // Cleanup unlink(filename_in); free(filename_in); free(filename_proc); free(filename_out); return 0; } int main (int argc, char** argv) { // Declare default settings settings = (struct settings *) calloc(1,sizeof(struct settings)); settings->nbits = 26; settings->nthreads = 256; settings->ngroups = 128; args_init(argc,argv); // CUDA config cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaGetDeviceCount(&settings->devices); // Inotify preparation inotify = (struct inotify *) calloc(1,sizeof(struct inotify)); inotify->fd = inotify_init(); strcpy(inotify->watchfolder,settings->input_folder); inotify->type = IN_CLOSE_WRITE | IN_MOVED_TO; inotify->wd = inotify_add_watch(inotify->fd,inotify->watchfolder,inotify->type); // An endless loop of inotify notifications while(1) { // Wait for a new notification memset(inotify->buffer,0,EVENT_BUF_LEN); int length = read(inotify->fd,inotify->buffer,EVENT_BUF_LEN); if(length < 0) break; int i = 0; // Process 1 or more notifications that have been sent here while (i < length) { inotify->event = ( struct inotify_event * ) &inotify->buffer[i]; if(inotify->event->len && !(inotify->event->mask & IN_ISDIR)) { char * eventname = NULL; if(-1 != asprintf(&eventname,"%s%s",settings->input_folder,inotify->event->name)) { // Process the incoming file (*hashcash_process_file)(eventname); // You could install a signal handler to break out of this endless loop by setting settings->exit to a non-zero value if(settings->exit) break; } else free(eventname); } i += EVENT_SIZE + inotify->event->len; } if(settings->exit) break; } // If we ever do arrive here, here's the cleanup inotify_rm_watch(inotify->fd,inotify->wd); close(inotify->fd); free(inotify); free(settings->input_folder); free(settings->output_folder); free(settings->process_folder); free(settings); }
0d1b012e7005135a4230781922590e3ac3fea1ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <math.h> #include <sstream> #include <fstream> #include <string.h> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 384 #define H 15 #define W 15 #define R 3 #define S 3 #define M 384 #define E 13 #define F 13 #define U 1 __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) { int row = threadIdx.y; int col = threadIdx.x; if((row<height) && (col<width)) { for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ for(int k=0; k<num_ch; k++){ d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)]*d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]; } } } if(d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]<0) d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = 0; } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod >=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); if ((c==0) || (d==0) || (c==14) || (d==14)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)); hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)); hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float)); //cpu_start = clock(); clock_t start, end; start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); end = clock(); //printf("cpu time is %f secs \n", (float)(end-start)/CLOCKS_PER_SEC); dim3 dimGrid(batch_size,384,1); dim3 dimBlock(13,13,1); //gpu_start = clock();hipLaunchKernelGGL(( ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,13,13,1,15,3,384,batch_size,384); //gpu_end = clock(); hipMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost); int g,h,s,u; float max_error = 0; string filename = "layer_4_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); // float error =abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if (error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error = %f\n", max_error); //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; hipFree(d_o); hipFree(d_i); hipFree(d_w); free(OPG); free(IP); free(WT); free(OP); return 0; }
0d1b012e7005135a4230781922590e3ac3fea1ec.cu
#include <stdio.h> #include <iostream> #include <math.h> #include <sstream> #include <fstream> #include <string.h> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 384 #define H 15 #define W 15 #define R 3 #define S 3 #define M 384 #define E 13 #define F 13 #define U 1 __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) { int row = threadIdx.y; int col = threadIdx.x; if((row<height) && (col<width)) { for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ for(int k=0; k<num_ch; k++){ d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] += d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(row)+i)*ip_height+(stride*(col)+j)]*d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]; } } } if(d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col]<0) d_o[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = 0; } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod >=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); if ((c==0) || (d==0) || (c==14) || (d==14)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)); cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)); cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float)); //cpu_start = clock(); clock_t start, end; start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); end = clock(); //printf("cpu time is %f secs \n", (float)(end-start)/CLOCKS_PER_SEC); dim3 dimGrid(batch_size,384,1); dim3 dimBlock(13,13,1); //gpu_start = clock(); ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,13,13,1,15,3,384,batch_size,384); //gpu_end = clock(); cudaMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost); int g,h,s,u; float max_error = 0; string filename = "layer_4_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); // float error =abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if (error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error = %f\n", max_error); //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; cudaFree(d_o); cudaFree(d_i); cudaFree(d_w); free(OPG); free(IP); free(WT); free(OP); return 0; }
d328c599fbc698c0f1b490ea4b65e31d813fed93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" namespace cv { namespace gpu { namespace device { namespace fast { __device__ unsigned int g_counter = 0; /////////////////////////////////////////////////////////////////////////// // calcKeypoints __constant__ uchar c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; // 1 -> v > x + th // 2 -> v < x - th // 0 -> x - th <= v <= x + th __device__ __forceinline__ int diffType(const int v, const int x, const int th) { const int diff = x - v; return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1); } __device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2) { mask1 = 0; mask2 = 0; int d1, d2; d1 = diffType(v, C[0] & 0xff, th); d2 = diffType(v, C[2] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 0; mask2 |= ((d1 & 2) >> 1) << 0; mask1 |= (d2 & 1) << 8; mask2 |= ((d2 & 2) >> 1) << 8; d1 = diffType(v, C[1] & 0xff, th); d2 = diffType(v, C[3] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 4; mask2 |= ((d1 & 2) >> 1) << 4; mask1 |= (d2 & 1) << 12; mask2 |= ((d2 & 2) >> 1) << 12; d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 2; mask2 |= ((d1 & 2) >> 1) << 2; mask1 |= (d2 & 1) << 10; mask2 |= ((d2 & 2) >> 1) << 10; d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 6; mask2 |= ((d1 & 2) >> 1) << 6; mask1 |= (d2 & 1) << 14; mask2 |= ((d2 & 2) >> 1) << 14; d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 1; mask2 |= ((d1 & 2) >> 1) << 1; mask1 |= (d2 & 1) << 9; mask2 |= ((d2 & 2) >> 1) << 9; d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 3; mask2 |= ((d1 & 2) >> 1) << 3; mask1 |= (d2 & 1) << 11; mask2 |= ((d2 & 2) >> 1) << 11; d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 5; mask2 |= ((d1 & 2) >> 1) << 5; mask1 |= (d2 & 1) << 13; mask2 |= ((d2 & 2) >> 1) << 13; d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th); mask1 |= (d1 & 1) << 7; mask2 |= ((d1 & 2) >> 1) << 7; mask1 |= (d2 & 1) << 15; mask2 |= ((d2 & 2) >> 1) << 15; } // 1 -> v > x + th // 2 -> v < x - th // 0 -> not a keypoint __device__ __forceinline__ bool isKeyPoint(int mask1, int mask2) { return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) || (__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7)))); } __device__ int cornerScore(const uint C[4], const int v, const int threshold) { // binary search in [threshold + 1, 255] int min = threshold + 1; int max = 255; while (min <= max) { const int mid = (min + max) >> 1; int mask1 = 0; int mask2 = 0; calcMask(C, v, mid, mask1, mask2); int isKp = static_cast<int>(isKeyPoint(mask1, mask2)); min = isKp * (mid + 1) + (isKp ^ 1) * min; max = (isKp ^ 1) * (mid - 1) + isKp * max; } return min - 1; } template <bool calcScore, class Mask> __global__ void calcKeypoints(const PtrStepSzb img, const Mask mask, short2* kpLoc, const unsigned int maxKeypoints, PtrStepi score, const int threshold) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110) const int j = threadIdx.x + blockIdx.x * blockDim.x + 3; const int i = threadIdx.y + blockIdx.y * blockDim.y + 3; if (i < img.rows - 3 && j < img.cols - 3 && mask(i, j)) { int v; uint C[4] = {0,0,0,0}; C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8; C[2] |= static_cast<uint>(img(i - 3, j)); C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8); C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8); C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8); C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8); C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8; C[3] |= static_cast<uint>(img(i, j - 3)); v = static_cast<int>(img(i, j)); C[1] |= static_cast<uint>(img(i, j + 3)); int d1 = diffType(v, C[1] & 0xff, threshold); int d2 = diffType(v, C[3] & 0xff, threshold); if ((d1 | d2) == 0) return; C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8; C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8); C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8); C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8); C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8); C[0] |= static_cast<uint>(img(i + 3, j)); C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8; int mask1 = 0; int mask2 = 0; calcMask(C, v, threshold, mask1, mask2); if (isKeyPoint(mask1, mask2)) { if (calcScore) score(i, j) = cornerScore(C, v, threshold); const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1)); if (ind < maxKeypoints) kpLoc[ind] = make_short2(j, i); } } #endif } int calcKeypoints_gpu(PtrStepSzb img, PtrStepSzb mask, short2* kpLoc, int maxKeypoints, PtrStepSzi score, int threshold) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) ); dim3 block(32, 8); dim3 grid; grid.x = divUp(img.cols - 6, block.x); grid.y = divUp(img.rows - 6, block.y); cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(unsigned int)) ); if (score.data) { if (mask.data) hipLaunchKernelGGL(( calcKeypoints<true>), dim3(grid), dim3(block), 0, 0, img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold); else hipLaunchKernelGGL(( calcKeypoints<true>), dim3(grid), dim3(block), 0, 0, img, WithOutMask(), kpLoc, maxKeypoints, score, threshold); } else { if (mask.data) hipLaunchKernelGGL(( calcKeypoints<false>), dim3(grid), dim3(block), 0, 0, img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold); else hipLaunchKernelGGL(( calcKeypoints<false>), dim3(grid), dim3(block), 0, 0, img, WithOutMask(), kpLoc, maxKeypoints, score, threshold); } cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); unsigned int count; cudaSafeCall( hipMemcpy(&count, counter_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost) ); return count; } /////////////////////////////////////////////////////////////////////////// // nonmaxSupression __global__ void nonmaxSupression(const short2* kpLoc, int count, const PtrStepSzi scoreMat, short2* locFinal, float* responseFinal) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110) const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kpIdx < count) { short2 loc = kpLoc[kpIdx]; int score = scoreMat(loc.y, loc.x); bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x ) && score > scoreMat(loc.y - 1, loc.x + 1) && score > scoreMat(loc.y , loc.x - 1) && score > scoreMat(loc.y , loc.x + 1) && score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x ) && score > scoreMat(loc.y + 1, loc.x + 1); if (ismax) { const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1)); locFinal[ind] = loc; responseFinal[ind] = static_cast<float>(score); } } #endif } int nonmaxSupression_gpu(const short2* kpLoc, int count, PtrStepSzi score, short2* loc, float* response) { void* counter_ptr; cudaSafeCall( hipGetSymbolAddress(&counter_ptr, g_counter) ); dim3 block(256); dim3 grid; grid.x = divUp(count, block.x); cudaSafeCall( hipMemset(counter_ptr, 0, sizeof(unsigned int)) ); hipLaunchKernelGGL(( nonmaxSupression), dim3(grid), dim3(block), 0, 0, kpLoc, count, score, loc, response); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); unsigned int new_count; cudaSafeCall( hipMemcpy(&new_count, counter_ptr, sizeof(unsigned int), hipMemcpyDeviceToHost) ); return new_count; } } // namespace fast }}} #endif /* CUDA_DISABLER */
d328c599fbc698c0f1b490ea4b65e31d813fed93.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // // Copyright (c) 2010, Paul Furgale, Chi Hay Tong // // The original code was written by Paul Furgale and Chi Hay Tong // and later optimized and prepared for integration into OpenCV by Itseez. // //M*/ #if !defined CUDA_DISABLER #include "opencv2/gpu/device/common.hpp" #include "opencv2/gpu/device/utility.hpp" namespace cv { namespace gpu { namespace device { namespace fast { __device__ unsigned int g_counter = 0; /////////////////////////////////////////////////////////////////////////// // calcKeypoints __constant__ uchar c_table[] = { 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xc0, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0x80, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0x80, 0x0, 0xc0, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; // 1 -> v > x + th // 2 -> v < x - th // 0 -> x - th <= v <= x + th __device__ __forceinline__ int diffType(const int v, const int x, const int th) { const int diff = x - v; return static_cast<int>(diff < -th) + (static_cast<int>(diff > th) << 1); } __device__ void calcMask(const uint C[4], const int v, const int th, int& mask1, int& mask2) { mask1 = 0; mask2 = 0; int d1, d2; d1 = diffType(v, C[0] & 0xff, th); d2 = diffType(v, C[2] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 0; mask2 |= ((d1 & 2) >> 1) << 0; mask1 |= (d2 & 1) << 8; mask2 |= ((d2 & 2) >> 1) << 8; d1 = diffType(v, C[1] & 0xff, th); d2 = diffType(v, C[3] & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 4; mask2 |= ((d1 & 2) >> 1) << 4; mask1 |= (d2 & 1) << 12; mask2 |= ((d2 & 2) >> 1) << 12; d1 = diffType(v, (C[0] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 2; mask2 |= ((d1 & 2) >> 1) << 2; mask1 |= (d2 & 1) << 10; mask2 |= ((d2 & 2) >> 1) << 10; d1 = diffType(v, (C[1] >> (2 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (2 * 8)) & 0xff, th); if ((d1 | d2) == 0) return; mask1 |= (d1 & 1) << 6; mask2 |= ((d1 & 2) >> 1) << 6; mask1 |= (d2 & 1) << 14; mask2 |= ((d2 & 2) >> 1) << 14; d1 = diffType(v, (C[0] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 1; mask2 |= ((d1 & 2) >> 1) << 1; mask1 |= (d2 & 1) << 9; mask2 |= ((d2 & 2) >> 1) << 9; d1 = diffType(v, (C[0] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[2] >> (3 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 3; mask2 |= ((d1 & 2) >> 1) << 3; mask1 |= (d2 & 1) << 11; mask2 |= ((d2 & 2) >> 1) << 11; d1 = diffType(v, (C[1] >> (1 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (1 * 8)) & 0xff, th); /*if ((d1 | d2) == 0) return;*/ mask1 |= (d1 & 1) << 5; mask2 |= ((d1 & 2) >> 1) << 5; mask1 |= (d2 & 1) << 13; mask2 |= ((d2 & 2) >> 1) << 13; d1 = diffType(v, (C[1] >> (3 * 8)) & 0xff, th); d2 = diffType(v, (C[3] >> (3 * 8)) & 0xff, th); mask1 |= (d1 & 1) << 7; mask2 |= ((d1 & 2) >> 1) << 7; mask1 |= (d2 & 1) << 15; mask2 |= ((d2 & 2) >> 1) << 15; } // 1 -> v > x + th // 2 -> v < x - th // 0 -> not a keypoint __device__ __forceinline__ bool isKeyPoint(int mask1, int mask2) { return (__popc(mask1) > 8 && (c_table[(mask1 >> 3) - 63] & (1 << (mask1 & 7)))) || (__popc(mask2) > 8 && (c_table[(mask2 >> 3) - 63] & (1 << (mask2 & 7)))); } __device__ int cornerScore(const uint C[4], const int v, const int threshold) { // binary search in [threshold + 1, 255] int min = threshold + 1; int max = 255; while (min <= max) { const int mid = (min + max) >> 1; int mask1 = 0; int mask2 = 0; calcMask(C, v, mid, mask1, mask2); int isKp = static_cast<int>(isKeyPoint(mask1, mask2)); min = isKp * (mid + 1) + (isKp ^ 1) * min; max = (isKp ^ 1) * (mid - 1) + isKp * max; } return min - 1; } template <bool calcScore, class Mask> __global__ void calcKeypoints(const PtrStepSzb img, const Mask mask, short2* kpLoc, const unsigned int maxKeypoints, PtrStepi score, const int threshold) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110) const int j = threadIdx.x + blockIdx.x * blockDim.x + 3; const int i = threadIdx.y + blockIdx.y * blockDim.y + 3; if (i < img.rows - 3 && j < img.cols - 3 && mask(i, j)) { int v; uint C[4] = {0,0,0,0}; C[2] |= static_cast<uint>(img(i - 3, j - 1)) << 8; C[2] |= static_cast<uint>(img(i - 3, j)); C[1] |= static_cast<uint>(img(i - 3, j + 1)) << (3 * 8); C[2] |= static_cast<uint>(img(i - 2, j - 2)) << (2 * 8); C[1] |= static_cast<uint>(img(i - 2, j + 2)) << (2 * 8); C[2] |= static_cast<uint>(img(i - 1, j - 3)) << (3 * 8); C[1] |= static_cast<uint>(img(i - 1, j + 3)) << 8; C[3] |= static_cast<uint>(img(i, j - 3)); v = static_cast<int>(img(i, j)); C[1] |= static_cast<uint>(img(i, j + 3)); int d1 = diffType(v, C[1] & 0xff, threshold); int d2 = diffType(v, C[3] & 0xff, threshold); if ((d1 | d2) == 0) return; C[3] |= static_cast<uint>(img(i + 1, j - 3)) << 8; C[0] |= static_cast<uint>(img(i + 1, j + 3)) << (3 * 8); C[3] |= static_cast<uint>(img(i + 2, j - 2)) << (2 * 8); C[0] |= static_cast<uint>(img(i + 2, j + 2)) << (2 * 8); C[3] |= static_cast<uint>(img(i + 3, j - 1)) << (3 * 8); C[0] |= static_cast<uint>(img(i + 3, j)); C[0] |= static_cast<uint>(img(i + 3, j + 1)) << 8; int mask1 = 0; int mask2 = 0; calcMask(C, v, threshold, mask1, mask2); if (isKeyPoint(mask1, mask2)) { if (calcScore) score(i, j) = cornerScore(C, v, threshold); const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1)); if (ind < maxKeypoints) kpLoc[ind] = make_short2(j, i); } } #endif } int calcKeypoints_gpu(PtrStepSzb img, PtrStepSzb mask, short2* kpLoc, int maxKeypoints, PtrStepSzi score, int threshold) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) ); dim3 block(32, 8); dim3 grid; grid.x = divUp(img.cols - 6, block.x); grid.y = divUp(img.rows - 6, block.y); cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) ); if (score.data) { if (mask.data) calcKeypoints<true><<<grid, block>>>(img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold); else calcKeypoints<true><<<grid, block>>>(img, WithOutMask(), kpLoc, maxKeypoints, score, threshold); } else { if (mask.data) calcKeypoints<false><<<grid, block>>>(img, SingleMask(mask), kpLoc, maxKeypoints, score, threshold); else calcKeypoints<false><<<grid, block>>>(img, WithOutMask(), kpLoc, maxKeypoints, score, threshold); } cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); unsigned int count; cudaSafeCall( cudaMemcpy(&count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) ); return count; } /////////////////////////////////////////////////////////////////////////// // nonmaxSupression __global__ void nonmaxSupression(const short2* kpLoc, int count, const PtrStepSzi scoreMat, short2* locFinal, float* responseFinal) { #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 110) const int kpIdx = threadIdx.x + blockIdx.x * blockDim.x; if (kpIdx < count) { short2 loc = kpLoc[kpIdx]; int score = scoreMat(loc.y, loc.x); bool ismax = score > scoreMat(loc.y - 1, loc.x - 1) && score > scoreMat(loc.y - 1, loc.x ) && score > scoreMat(loc.y - 1, loc.x + 1) && score > scoreMat(loc.y , loc.x - 1) && score > scoreMat(loc.y , loc.x + 1) && score > scoreMat(loc.y + 1, loc.x - 1) && score > scoreMat(loc.y + 1, loc.x ) && score > scoreMat(loc.y + 1, loc.x + 1); if (ismax) { const unsigned int ind = atomicInc(&g_counter, (unsigned int)(-1)); locFinal[ind] = loc; responseFinal[ind] = static_cast<float>(score); } } #endif } int nonmaxSupression_gpu(const short2* kpLoc, int count, PtrStepSzi score, short2* loc, float* response) { void* counter_ptr; cudaSafeCall( cudaGetSymbolAddress(&counter_ptr, g_counter) ); dim3 block(256); dim3 grid; grid.x = divUp(count, block.x); cudaSafeCall( cudaMemset(counter_ptr, 0, sizeof(unsigned int)) ); nonmaxSupression<<<grid, block>>>(kpLoc, count, score, loc, response); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); unsigned int new_count; cudaSafeCall( cudaMemcpy(&new_count, counter_ptr, sizeof(unsigned int), cudaMemcpyDeviceToHost) ); return new_count; } } // namespace fast }}} #endif /* CUDA_DISABLER */
e8003fb90f271e22bb0c53c272fa6f8bcd0c41fd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime_api.h> #include <tuple> #include <iostream> #include <string.h> double time_h = 0; double time_d = 0; int sample_rounds = 10; // round double to int int my_round(double d) { int y = (int)(d + 0.5+(d<0)); return y; } void meanFilter_h(unsigned char* raw_image_matrix,unsigned char* filtered_image_data,int image_width, int image_height, int window_size) { // int size = 3 * image_width * image_height; int half_window = (window_size-window_size % 2)/2; for(int i = 0; i < image_height; i += 1){ for(int j = 0; j < image_width; j += 1){ int k = 3*(i*image_height+j); int top, bottom, left, right; if(i-half_window >= 0){top = i-half_window;}else{top = 0;}// top limit if(i+half_window <= image_height-1){bottom = i+half_window;}else{bottom = image_height-1;}// bottom limit if(j-half_window >= 0){left = j-half_window;}else{left = 0;}// left limit if(j+half_window <= image_width-1){right = j+half_window;}else{right = image_width-1;}// right limit double first_byte = 0; double second_byte = 0; double third_byte = 0; // move inside the window for(int x = top; x <= bottom; x++){ for(int y = left; y <= right; y++){ int pos = 3*(x*image_height + y); // three bytes first_byte += raw_image_matrix[pos]; second_byte += raw_image_matrix[pos+1]; third_byte += raw_image_matrix[pos+2]; } } int effective_window_size = (bottom-top+1)*(right-left+1); filtered_image_data[k] = first_byte/effective_window_size; filtered_image_data[k+1] = second_byte/effective_window_size; filtered_image_data[k+2] =third_byte/effective_window_size; } } // printf("Result from CPU\n"); // for(int z = 0; z < size; z += 3) // { // printf("(%d, %d, %d)\n",filtered_image_data[z], filtered_image_data[z+1], filtered_image_data[z+2]); // } } __global__ void meanFilter_d(unsigned char* raw_image_matrix, unsigned char* filtered_image_data, int image_width, int image_height, int half_window) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < image_height && j < image_width){ int k = 3*(i*image_height+j); int top, bottom, left, right; if(i-half_window >= 0){top = i-half_window;}else{top = 0;}// top limit if(i+half_window <= image_height-1){bottom = i+half_window;}else{bottom = image_height-1;}// bottom limit if(j-half_window >= 0){left = j-half_window;}else{left = 0;}// left limit if(j+half_window <= image_width-1){right = j+half_window;}else{right = image_width-1;}// right limit double first_byte = 0; double second_byte = 0; double third_byte = 0; // move inside the window for(int x = top; x <= bottom; x++){ for(int y = left; y <= right; y++){ int pos = 3*(x*image_height + y); // three bytes first_byte += raw_image_matrix[pos]; second_byte += raw_image_matrix[pos+1]; third_byte += raw_image_matrix[pos+2]; } } int effective_window_size = (bottom-top+1)*(right-left+1); filtered_image_data[k] = first_byte/effective_window_size; filtered_image_data[k+1] = second_byte/effective_window_size; filtered_image_data[k+2] =third_byte/effective_window_size; } } int main(int argc,char **argv) { printf("Read the image file...\n"); //reading the bitmap FILE* f = fopen(argv[1], "rb"); unsigned char info[54]; fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract the height and width of the image from header int width, height; memcpy(&width, info + 18, sizeof(int)); memcpy(&height, info + 22, sizeof(int)); int window_size = strtol(argv[2],NULL,10); printf("The Window size: %d\n",window_size); printf("Dimensions of Image: (%d, %d)\n",width,height); int size = 3 * width * abs(height); printf("convert the bitmap to char array...\n"); unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel unsigned char* result_image_data_d; unsigned char* result_image_data_h = new unsigned char[size]; unsigned char* result_image_data_h1 = new unsigned char[size]; unsigned char* image_data_d; fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); //convert the bitmap to char array // dim3 dimGrid (GRID_SIZE, GRID_SIZE); // dim3 dimBlock (BLOCK_SIZE, BLOCK_SIZE); int block_size = 32; int grid_size = width/block_size; dim3 dimBlock(block_size, block_size, 1); dim3 dimGrid(grid_size, grid_size, 1); printf("GRID_SIZE: (%d, %d)\n", grid_size, grid_size); printf("BLOCK_SIZE: (%d, %d)\n", block_size, block_size); for(int _ = 0; _ < sample_rounds; _ += 1) { printf("Allocatating device memory on host.....\n"); hipMalloc((void **)&image_data_d,size*sizeof(unsigned char)); hipMalloc((void **)&result_image_data_d,size*sizeof(unsigned char)); printf("Copying to device..\n"); hipMemcpy(image_data_d,data,size*sizeof(unsigned char),hipMemcpyHostToDevice); int half_window = (window_size-window_size % 2)/2; // call to GPU code clock_t start_d=clock(); printf("Doing GPU Mean Filter...\n"); hipLaunchKernelGGL(( meanFilter_d) , dim3(dimGrid), dim3(dimBlock) , 0, 0, image_data_d, result_image_data_d, width, height, half_window); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s\n", hipGetErrorString(error) ); exit(-1); } clock_t end_d = clock(); // call to CPU code clock_t start_h = clock(); printf("Doing CPU Mean Filter...\n"); meanFilter_h(data, result_image_data_h1, width, height, window_size); clock_t end_h = clock(); printf("Result from GPU\n"); hipMemcpy(result_image_data_h,result_image_data_d,size*sizeof(unsigned char),hipMemcpyDeviceToHost); printf("compare results code : %d\n",memcmp(result_image_data_h, result_image_data_h1, size*sizeof(unsigned char))); time_h += (double)(end_h-start_h)/CLOCKS_PER_SEC; time_d += (double)(end_d-start_d)/CLOCKS_PER_SEC; hipFree(image_data_d); hipFree(result_image_data_d); } // hipMemcpy(data_h,image_data_d,size*sizeof(unsigned char),hipMemcpyDeviceToHost); // for(int i = 0; i < size; i += 3) // { // printf("(%d, %d, %d)\n",result_image_data_h[i], result_image_data_h[i+1], result_image_data_h[i+2]); // } printf(" GPU Time: %f\n",(time_d/sample_rounds)); printf(" CPU Time: %f\n",(time_h/sample_rounds)); printf("CPU/GPU time: %f\n",(time_h/time_d)); return 0; }
e8003fb90f271e22bb0c53c272fa6f8bcd0c41fd.cu
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <cuda_profiler_api.h> #include <tuple> #include <iostream> #include <string.h> double time_h = 0; double time_d = 0; int sample_rounds = 10; // round double to int int my_round(double d) { int y = (int)(d + 0.5+(d<0)); return y; } void meanFilter_h(unsigned char* raw_image_matrix,unsigned char* filtered_image_data,int image_width, int image_height, int window_size) { // int size = 3 * image_width * image_height; int half_window = (window_size-window_size % 2)/2; for(int i = 0; i < image_height; i += 1){ for(int j = 0; j < image_width; j += 1){ int k = 3*(i*image_height+j); int top, bottom, left, right; if(i-half_window >= 0){top = i-half_window;}else{top = 0;}// top limit if(i+half_window <= image_height-1){bottom = i+half_window;}else{bottom = image_height-1;}// bottom limit if(j-half_window >= 0){left = j-half_window;}else{left = 0;}// left limit if(j+half_window <= image_width-1){right = j+half_window;}else{right = image_width-1;}// right limit double first_byte = 0; double second_byte = 0; double third_byte = 0; // move inside the window for(int x = top; x <= bottom; x++){ for(int y = left; y <= right; y++){ int pos = 3*(x*image_height + y); // three bytes first_byte += raw_image_matrix[pos]; second_byte += raw_image_matrix[pos+1]; third_byte += raw_image_matrix[pos+2]; } } int effective_window_size = (bottom-top+1)*(right-left+1); filtered_image_data[k] = first_byte/effective_window_size; filtered_image_data[k+1] = second_byte/effective_window_size; filtered_image_data[k+2] =third_byte/effective_window_size; } } // printf("Result from CPU\n"); // for(int z = 0; z < size; z += 3) // { // printf("(%d, %d, %d)\n",filtered_image_data[z], filtered_image_data[z+1], filtered_image_data[z+2]); // } } __global__ void meanFilter_d(unsigned char* raw_image_matrix, unsigned char* filtered_image_data, int image_width, int image_height, int half_window) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if (i < image_height && j < image_width){ int k = 3*(i*image_height+j); int top, bottom, left, right; if(i-half_window >= 0){top = i-half_window;}else{top = 0;}// top limit if(i+half_window <= image_height-1){bottom = i+half_window;}else{bottom = image_height-1;}// bottom limit if(j-half_window >= 0){left = j-half_window;}else{left = 0;}// left limit if(j+half_window <= image_width-1){right = j+half_window;}else{right = image_width-1;}// right limit double first_byte = 0; double second_byte = 0; double third_byte = 0; // move inside the window for(int x = top; x <= bottom; x++){ for(int y = left; y <= right; y++){ int pos = 3*(x*image_height + y); // three bytes first_byte += raw_image_matrix[pos]; second_byte += raw_image_matrix[pos+1]; third_byte += raw_image_matrix[pos+2]; } } int effective_window_size = (bottom-top+1)*(right-left+1); filtered_image_data[k] = first_byte/effective_window_size; filtered_image_data[k+1] = second_byte/effective_window_size; filtered_image_data[k+2] =third_byte/effective_window_size; } } int main(int argc,char **argv) { printf("Read the image file...\n"); //reading the bitmap FILE* f = fopen(argv[1], "rb"); unsigned char info[54]; fread(info, sizeof(unsigned char), 54, f); // read the 54-byte header // extract the height and width of the image from header int width, height; memcpy(&width, info + 18, sizeof(int)); memcpy(&height, info + 22, sizeof(int)); int window_size = strtol(argv[2],NULL,10); printf("The Window size: %d\n",window_size); printf("Dimensions of Image: (%d, %d)\n",width,height); int size = 3 * width * abs(height); printf("convert the bitmap to char array...\n"); unsigned char* data = new unsigned char[size]; // allocate 3 bytes per pixel unsigned char* result_image_data_d; unsigned char* result_image_data_h = new unsigned char[size]; unsigned char* result_image_data_h1 = new unsigned char[size]; unsigned char* image_data_d; fread(data, sizeof(unsigned char), size, f); // read the rest of the data at once fclose(f); //convert the bitmap to char array // dim3 dimGrid (GRID_SIZE, GRID_SIZE); // dim3 dimBlock (BLOCK_SIZE, BLOCK_SIZE); int block_size = 32; int grid_size = width/block_size; dim3 dimBlock(block_size, block_size, 1); dim3 dimGrid(grid_size, grid_size, 1); printf("GRID_SIZE: (%d, %d)\n", grid_size, grid_size); printf("BLOCK_SIZE: (%d, %d)\n", block_size, block_size); for(int _ = 0; _ < sample_rounds; _ += 1) { printf("Allocatating device memory on host.....\n"); cudaMalloc((void **)&image_data_d,size*sizeof(unsigned char)); cudaMalloc((void **)&result_image_data_d,size*sizeof(unsigned char)); printf("Copying to device..\n"); cudaMemcpy(image_data_d,data,size*sizeof(unsigned char),cudaMemcpyHostToDevice); int half_window = (window_size-window_size % 2)/2; // call to GPU code clock_t start_d=clock(); printf("Doing GPU Mean Filter...\n"); meanFilter_d <<< dimGrid, dimBlock >>> (image_data_d, result_image_data_d, width, height, half_window); cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s\n", cudaGetErrorString(error) ); exit(-1); } clock_t end_d = clock(); // call to CPU code clock_t start_h = clock(); printf("Doing CPU Mean Filter...\n"); meanFilter_h(data, result_image_data_h1, width, height, window_size); clock_t end_h = clock(); printf("Result from GPU\n"); cudaMemcpy(result_image_data_h,result_image_data_d,size*sizeof(unsigned char),cudaMemcpyDeviceToHost); printf("compare results code : %d\n",memcmp(result_image_data_h, result_image_data_h1, size*sizeof(unsigned char))); time_h += (double)(end_h-start_h)/CLOCKS_PER_SEC; time_d += (double)(end_d-start_d)/CLOCKS_PER_SEC; cudaFree(image_data_d); cudaFree(result_image_data_d); } // cudaMemcpy(data_h,image_data_d,size*sizeof(unsigned char),cudaMemcpyDeviceToHost); // for(int i = 0; i < size; i += 3) // { // printf("(%d, %d, %d)\n",result_image_data_h[i], result_image_data_h[i+1], result_image_data_h[i+2]); // } printf(" GPU Time: %f\n",(time_d/sample_rounds)); printf(" CPU Time: %f\n",(time_h/sample_rounds)); printf("CPU/GPU time: %f\n",(time_h/time_d)); return 0; }
46911043ab2c452114b572d995693ab50c3c2100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void multiply( float *A2, float *B2, float *C, int N, int threads_num ){ __shared__ float *A; __shared__ float *B; A = A2; B = B2; float tmp; int k, pos; int a = N * N * (blockDim.x * blockIdx.x + threadIdx.x) / threads_num, b; if ( blockDim.x * blockIdx.x + threadIdx.x == threads_num - 1) b = N * N; else b = N * N * ( blockDim.x * blockIdx.x + threadIdx.x + 1) / threads_num; for( pos = a; pos < b; pos++ ){ tmp = 0; for( k = 0; k < N; k++ ) tmp += A[ N * (pos / N ) + k ] * B[ k * N + pos - ( pos / N) * N]; C[ pos ] = tmp; } }
46911043ab2c452114b572d995693ab50c3c2100.cu
#include "includes.h" __global__ void multiply( float *A2, float *B2, float *C, int N, int threads_num ){ __shared__ float *A; __shared__ float *B; A = A2; B = B2; float tmp; int k, pos; int a = N * N * (blockDim.x * blockIdx.x + threadIdx.x) / threads_num, b; if ( blockDim.x * blockIdx.x + threadIdx.x == threads_num - 1) b = N * N; else b = N * N * ( blockDim.x * blockIdx.x + threadIdx.x + 1) / threads_num; for( pos = a; pos < b; pos++ ){ tmp = 0; for( k = 0; k < N; k++ ) tmp += A[ N * (pos / N ) + k ] * B[ k * N + pos - ( pos / N) * N]; C[ pos ] = tmp; } }
ca0e472617a839553ceb7f02018336362fbc62e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility default -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-DEFAULT %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility protected -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-PROTECTED %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility hidden -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-HIDDEN %s #include "Inputs/cuda.h" // CHECK-DEFAULT: @c = addrspace(4) externally_initialized global // CHECK-DEFAULT: @g = addrspace(1) externally_initialized global // CHECK-PROTECTED: @c = protected addrspace(4) externally_initialized global // CHECK-PROTECTED: @g = protected addrspace(1) externally_initialized global // CHECK-HIDDEN: @c = protected addrspace(4) externally_initialized global // CHECK-HIDDEN: @g = protected addrspace(1) externally_initialized global __constant__ int c; __device__ int g; // CHECK-DEFAULT: @e = external addrspace(1) global // CHECK-PROTECTED: @e = external protected addrspace(1) global // CHECK-HIDDEN: @e = external protected addrspace(1) global extern __device__ int e; // dummy one to hold reference to `e`. __device__ int f() { return e; } // CHECK-DEFAULT: define amdgpu_kernel void @_Z3foov() // CHECK-PROTECTED: define protected amdgpu_kernel void @_Z3foov() // CHECK-HIDDEN: define protected amdgpu_kernel void @_Z3foov() __global__ void foo() { g = c; }
ca0e472617a839553ceb7f02018336362fbc62e4.cu
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility default -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-DEFAULT %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility protected -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-PROTECTED %s // RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -fcuda-is-device -fapply-global-visibility-to-externs -fvisibility hidden -emit-llvm -o - %s | FileCheck --check-prefix=CHECK-HIDDEN %s #include "Inputs/cuda.h" // CHECK-DEFAULT: @c = addrspace(4) externally_initialized global // CHECK-DEFAULT: @g = addrspace(1) externally_initialized global // CHECK-PROTECTED: @c = protected addrspace(4) externally_initialized global // CHECK-PROTECTED: @g = protected addrspace(1) externally_initialized global // CHECK-HIDDEN: @c = protected addrspace(4) externally_initialized global // CHECK-HIDDEN: @g = protected addrspace(1) externally_initialized global __constant__ int c; __device__ int g; // CHECK-DEFAULT: @e = external addrspace(1) global // CHECK-PROTECTED: @e = external protected addrspace(1) global // CHECK-HIDDEN: @e = external protected addrspace(1) global extern __device__ int e; // dummy one to hold reference to `e`. __device__ int f() { return e; } // CHECK-DEFAULT: define amdgpu_kernel void @_Z3foov() // CHECK-PROTECTED: define protected amdgpu_kernel void @_Z3foov() // CHECK-HIDDEN: define protected amdgpu_kernel void @_Z3foov() __global__ void foo() { g = c; }
fded9d16e916d3df68609bd34759c7eba6d72a55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File nbody.cu * * Implementation of the N-Body problem * * Paraleln programovn na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xmarci10 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate velocity and new position for each particle * @param p_in - input particles * @param p_out - output particles * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_velocity(const t_particles p_in, t_particles p_out, int N, float dt) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; if (thread_id < N) { float r, dx, dy, dz; float posx, posy, posz; float velx, vely, velz; float weight; float F; /** * Ressetting the registers for partial results. * note: Using registers reduces the number of accesses to global memory. * Partial results are saved at the end of the calculation. */ float tmpvelx = 0.0f; float tmpvely = 0.0f; float tmpvelz = 0.0f; /** * Loading positions, velocities and weights from the global memory into the registers. * note: Pre-reading data from the global memory, reduces the number of * memory accesses and thus signigicantly speeds up the calculation. */ posx = p_in.pos_x[thread_id]; posy = p_in.pos_y[thread_id]; posz = p_in.pos_z[thread_id]; velx = p_in.vel_x[thread_id]; vely = p_in.vel_y[thread_id]; velz = p_in.vel_z[thread_id]; weight = p_in.weight[thread_id]; for (int j = 0; j < N; j++) { /** * Loading the weight of the second particle as it will be used multiple times. * note: It reduces the number of accesses to the global memory. */ float weight_j = p_in.weight[j]; /** * The calculation of the gravitational force is divided into several * several instructions in order to eliminate data dependencies, and thus * we have increased the ILP. */ F = -G * dt * weight_j; dx = posx - p_in.pos_x[j]; dy = posy - p_in.pos_y[j]; dz = posz - p_in.pos_z[j]; r = sqrt(dx*dx + dy*dy + dz*dz); // see previous comment F /= (r * r * r + FLT_MIN); // Add the velocity obtained by the gravitational action of the body 'j'. tmpvelx += (r > COLLISION_DISTANCE) ? F * dx : 0.0f; tmpvely += (r > COLLISION_DISTANCE) ? F * dy : 0.0f; tmpvelz += (r > COLLISION_DISTANCE) ? F * dz : 0.0f; if (r < COLLISION_DISTANCE) { /** * Reuseage of the registers of distances. * note: The values are calculated only once and then used several times, see below. */ dx = weight - weight_j; dy = 2 * weight_j; dz = weight + weight_j; // Add the velocity obtained by the collision with the body 'j'. tmpvelx += (r > 0.0f) ? ((dx * velx + dy * p_in.vel_x[j]) / dz) - velx : 0.0f; tmpvely += (r > 0.0f) ? ((dx * vely + dy * p_in.vel_y[j]) / dz) - vely : 0.0f; tmpvelz += (r > 0.0f) ? ((dx * velz + dy * p_in.vel_z[j]) / dz) - velz : 0.0f; } } /** * Update particle * note: Write to global memory only once at the end of the cycle. */ velx += tmpvelx; p_out.vel_x[thread_id] = velx; p_out.pos_x[thread_id] = velx * dt + posx; vely += tmpvely; p_out.vel_y[thread_id] = vely; p_out.pos_y[thread_id] = vely * dt + posy; velz += tmpvelz; p_out.vel_z[thread_id] = velz; p_out.pos_z[thread_id] = velz * dt + posz; } }// end of calculate_velocity //----------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N) { }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc& memDesc) { float4 com = {0 ,0, 0, 0}; for(int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
fded9d16e916d3df68609bd34759c7eba6d72a55.cu
/** * @File nbody.cu * * Implementation of the N-Body problem * * Paralelní programování na GPU (PCG 2020) * Projekt c. 1 (cuda) * Login: xmarci10 */ #include <cmath> #include <cfloat> #include "nbody.h" /** * CUDA kernel to calculate velocity and new position for each particle * @param p_in - input particles * @param p_out - output particles * @param N - Number of particles * @param dt - Size of the time step */ __global__ void calculate_velocity(const t_particles p_in, t_particles p_out, int N, float dt) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; if (thread_id < N) { float r, dx, dy, dz; float posx, posy, posz; float velx, vely, velz; float weight; float F; /** * Ressetting the registers for partial results. * note: Using registers reduces the number of accesses to global memory. * Partial results are saved at the end of the calculation. */ float tmpvelx = 0.0f; float tmpvely = 0.0f; float tmpvelz = 0.0f; /** * Loading positions, velocities and weights from the global memory into the registers. * note: Pre-reading data from the global memory, reduces the number of * memory accesses and thus signigicantly speeds up the calculation. */ posx = p_in.pos_x[thread_id]; posy = p_in.pos_y[thread_id]; posz = p_in.pos_z[thread_id]; velx = p_in.vel_x[thread_id]; vely = p_in.vel_y[thread_id]; velz = p_in.vel_z[thread_id]; weight = p_in.weight[thread_id]; for (int j = 0; j < N; j++) { /** * Loading the weight of the second particle as it will be used multiple times. * note: It reduces the number of accesses to the global memory. */ float weight_j = p_in.weight[j]; /** * The calculation of the gravitational force is divided into several * several instructions in order to eliminate data dependencies, and thus * we have increased the ILP. */ F = -G * dt * weight_j; dx = posx - p_in.pos_x[j]; dy = posy - p_in.pos_y[j]; dz = posz - p_in.pos_z[j]; r = sqrt(dx*dx + dy*dy + dz*dz); // see previous comment F /= (r * r * r + FLT_MIN); // Add the velocity obtained by the gravitational action of the body 'j'. tmpvelx += (r > COLLISION_DISTANCE) ? F * dx : 0.0f; tmpvely += (r > COLLISION_DISTANCE) ? F * dy : 0.0f; tmpvelz += (r > COLLISION_DISTANCE) ? F * dz : 0.0f; if (r < COLLISION_DISTANCE) { /** * Reuseage of the registers of distances. * note: The values are calculated only once and then used several times, see below. */ dx = weight - weight_j; dy = 2 * weight_j; dz = weight + weight_j; // Add the velocity obtained by the collision with the body 'j'. tmpvelx += (r > 0.0f) ? ((dx * velx + dy * p_in.vel_x[j]) / dz) - velx : 0.0f; tmpvely += (r > 0.0f) ? ((dx * vely + dy * p_in.vel_y[j]) / dz) - vely : 0.0f; tmpvelz += (r > 0.0f) ? ((dx * velz + dy * p_in.vel_z[j]) / dz) - velz : 0.0f; } } /** * Update particle * note: Write to global memory only once at the end of the cycle. */ velx += tmpvelx; p_out.vel_x[thread_id] = velx; p_out.pos_x[thread_id] = velx * dt + posx; vely += tmpvely; p_out.vel_y[thread_id] = vely; p_out.pos_y[thread_id] = vely * dt + posy; velz += tmpvelz; p_out.vel_z[thread_id] = velz; p_out.pos_z[thread_id] = velz * dt + posz; } }// end of calculate_velocity //----------------------------------------------------------------------------------------------------------------------- /** * CUDA kernel to update particles * @param p - particles * @param comX - pointer to a center of mass position in X * @param comY - pointer to a center of mass position in Y * @param comZ - pointer to a center of mass position in Z * @param comW - pointer to a center of mass weight * @param lock - pointer to a user-implemented lock * @param N - Number of particles */ __global__ void centerOfMass(t_particles p, float* comX, float* comY, float* comZ, float* comW, int* lock, const int N) { }// end of centerOfMass //---------------------------------------------------------------------------------------------------------------------- /** * CPU implementation of the Center of Mass calculation * @param particles - All particles in the system * @param N - Number of particles */ __host__ float4 centerOfMassCPU(MemDesc& memDesc) { float4 com = {0 ,0, 0, 0}; for(int i = 0; i < memDesc.getDataSize(); i++) { // Calculate the vector on the line connecting points and most recent position of center-of-mass const float dx = memDesc.getPosX(i) - com.x; const float dy = memDesc.getPosY(i) - com.y; const float dz = memDesc.getPosZ(i) - com.z; // Calculate weight ratio only if at least one particle isn't massless const float dw = ((memDesc.getWeight(i) + com.w) > 0.0f) ? ( memDesc.getWeight(i) / (memDesc.getWeight(i) + com.w)) : 0.0f; // Update position and weight of the center-of-mass according to the weight ration and vector com.x += dx * dw; com.y += dy * dw; com.z += dz * dw; com.w += memDesc.getWeight(i); } return com; }// enf of centerOfMassCPU //----------------------------------------------------------------------------------------------------------------------
7d2c85b03e7bb75b36ce66527ad029870df832b1.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <string> #include <sys/time.h> #include <vector> #include <cmath> #include <hip/hip_runtime.h> #include <thrust/system_error.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/fill.h> #include <thrust/inner_product.h> #pragma push #pragma diag_suppress = code_is_unreachable #include "newmat.h" #include "miscmaths/miscmaths.h" #ifndef EXPOSE_TREACHEROUS #define I_CUDAVOLUME_H_DEFINED_ET #define EXPOSE_TREACHEROUS #endif #include "newimage/newimageall.h" #pragma pop #include "EddyHelperClasses.h" #include "EddyKernels.h" #include "EddyFunctors.h" #include "CudaVolume.h" using namespace EDDY; using namespace EddyKernels; void CudaVolume::SetHdr(const CudaVolume4D& cv) EddyTry { _spv=false; _sz=cv._sz; _hdr=cv._hdr; try { _devec.resize(cv.Size()); _spcoef.clear(); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SetHdr with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume::Sample(const EDDY::CudaImageCoordinates& coord, CudaVolume& smpl) const EddyTry { if (Interp()!=NEWIMAGE::spline && Interp()!=NEWIMAGE::trilinear) throw EddyException("CudaVolume::Sample: Invalid interpolation option"); if (Extrap()!=NEWIMAGE::extraslice && Extrap()!=NEWIMAGE::periodic && Extrap()!=NEWIMAGE::mirror) throw EddyException("CudaVolume::Sample: Invalid extrapolation option"); if (smpl!=*this) throw EddyException("CudaVolume::Sample: Dimension mismatch"); if (Interp()==NEWIMAGE::spline && !_spv) { if (_spcoef.size() != _devec.size()) { try { _spcoef.resize(_devec.size()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sample_1 after call to resize with message: " << e.what() << std::endl; throw; } } calculate_spline_coefs(_sz,_devec,_spcoef); _spv = true; } int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; else if (Extrap()==NEWIMAGE::mirror) ep = EddyKernels::MIRROR; if (Interp()==NEWIMAGE::spline) { hipLaunchKernelGGL(( EddyKernels::spline_interpolate), dim3(nblocks),dim3(tpb), 0, 0, Size(0),Size(1),Size(2),sp_ptr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr()); EddyKernels::CudaSync("EddyKernels::spline_interpolate"); } else { hipLaunchKernelGGL(( EddyKernels::linear_interpolate), dim3(nblocks),dim3(tpb), 0, 0, Size(0),Size(1),Size(2),GetPtr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr()); EddyKernels::CudaSync("EddyKernels::linear_interpolate"); } } EddyCatch void CudaVolume::Sample(const EDDY::CudaImageCoordinates& coord, CudaVolume& smpl, CudaVolume4D& dsmpl) const EddyTry { if (Interp()!=NEWIMAGE::spline && Interp()!=NEWIMAGE::trilinear) throw EddyException("CudaVolume::Sample: Invalid interpolation option"); if (Extrap()!=NEWIMAGE::extraslice && Extrap()!=NEWIMAGE::periodic && Extrap()!=NEWIMAGE::mirror) throw EddyException("CudaVolume::Sample: Invalid extrapolation option"); if (smpl!=(*this) || dsmpl!=(*this)) throw EddyException("CudaVolume::Sample: Dimension mismatch"); if (dsmpl.Size(3)!=3) throw EddyException("CudaVolume::Sample: dsmpl.Size(3) must be 3"); if (Interp()==NEWIMAGE::spline && !_spv) { if (_spcoef.size() != _devec.size()) { try { _spcoef.resize(_devec.size()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sample_2 after call to resize with message: " << e.what() << std::endl; throw; } } calculate_spline_coefs(_sz,_devec,_spcoef); _spv = true; } int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; else if (Extrap()==NEWIMAGE::mirror) ep = EddyKernels::MIRROR; if (Interp()==NEWIMAGE::spline) { hipLaunchKernelGGL(( EddyKernels::spline_interpolate), dim3(nblocks),dim3(tpb), 0, 0, Size(0),Size(1),Size(2),sp_ptr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr(), dsmpl.GetPtr(0),dsmpl.GetPtr(1),dsmpl.GetPtr(2)); EddyKernels::CudaSync("EddyKernels::spline_interpolate"); } else { hipLaunchKernelGGL(( EddyKernels::linear_interpolate), dim3(nblocks),dim3(tpb), 0, 0, Size(0),Size(1),Size(2),GetPtr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr(), dsmpl.GetPtr(0),dsmpl.GetPtr(1),dsmpl.GetPtr(2)); EddyKernels::CudaSync("EddyKernels::linear_interpolate"); } } EddyCatch void CudaVolume::ValidMask(const EDDY::CudaImageCoordinates& coord, CudaVolume& mask) const EddyTry { int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; std::vector<bool> epval = ExtrapValid(); hipLaunchKernelGGL(( EddyKernels::valid_voxels), dim3(nblocks),dim3(tpb), 0, 0, Size(0),Size(1),Size(2),epval[0],epval[1],epval[2], coord.XPtr(),coord.YPtr(),coord.ZPtr(),nthreads,mask.GetPtr()); EddyKernels::CudaSync("EddyKernels::valid_voxels"); } EddyCatch bool CudaVolume::operator==(const CudaVolume4D& rhs) const EddyTry { return(this->_sz[0]==rhs.Size(0) && this->_sz[1]==rhs.Size(1) && this->_sz[2]==rhs.Size(2) && fabs(this->_hdr.xdim()-rhs.Vxs(0))<1e-6 && fabs(this->_hdr.ydim()-rhs.Vxs(1))<1e-6 && fabs(this->_hdr.zdim()-rhs.Vxs(2))<1e-6); } EddyCatch CudaVolume& CudaVolume::operator+=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator+=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator+=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::plus<float>()); if (_spv && cv._spv) { thrust::transform(_spcoef.begin(),_spcoef.end(),cv._spcoef.begin(),_spcoef.begin(),thrust::plus<float>()); } else _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator+= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator-=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator-=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator-=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::minus<float>()); if (_spv && cv._spv) { thrust::transform(_spcoef.begin(),_spcoef.end(),cv._spcoef.begin(),_spcoef.begin(),thrust::minus<float>()); } else _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator-= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator*=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator*=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator*=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::multiplies<float>()); _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator*= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator/=(float a) EddyTry { if (!a) throw EddyException("CudaVolume::operator/=: Division by zero"); try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::MulByScalar<float>(1.0/a)); if (_spv) thrust::transform(_spcoef.begin(),_spcoef.end(),_spcoef.begin(),EDDY::MulByScalar<float>(1.0/a)); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator/= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch void CudaVolume::Smooth(float fwhm, const CudaVolume& mask) EddyTry { CudaVolume smask=mask; *this *= mask; this->Smooth(fwhm); smask.Smooth(fwhm); this->DivideWithinMask(smask,mask); *this *= mask; } EddyCatch void CudaVolume::MultiplyAndAddToMe(const CudaVolume& pv, float a) EddyTry { if (pv!=*this) throw EddyException("CudaVolume::MultiplyAndAddToMe: Dimension mismatch"); try { thrust::transform(_devec.begin(),_devec.end(),pv._devec.begin(),_devec.begin(),EDDY::MulAndAdd<float>(a)); if (_spv) { if (pv._spv) thrust::transform(_spcoef.begin(),_spcoef.end(),pv._spcoef.begin(),_spcoef.begin(),EDDY::MulAndAdd<float>(a)); else { _spcoef.clear(); _spv=false; }; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::MultiplyAndAddToMe: with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume::SubtractMultiplyAndAddToMe(const CudaVolume& pv, const CudaVolume& nv, float a) EddyTry { if (pv!=*this || nv!=*this) throw EddyException("CudaVolume::SubtractMultiplyAndAddToMe: Dimension mismatch"); int tpb = threads_per_block_smaatm; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::subtract_multiply_and_add_to_me), dim3(nblocks),dim3(tpb), 0, 0, pv.GetPtr(),nv.GetPtr(),a,nthreads,GetPtr()); EddyKernels::CudaSync("EddyKernels::subtract_multiply_and_add_to_me"); if (_spv) { _spcoef.clear(); _spv=false; } return; } EddyCatch void CudaVolume::SubtractSquareAndAddToMe(const CudaVolume& pv, const CudaVolume& nv) EddyTry { if (pv!=*this || nv!=*this) throw EddyException("CudaVolume::SubtractSquareAndAddToMe: Dimension mismatch"); int tpb = threads_per_block_ssaatm; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::subtract_square_and_add_to_me), dim3(nblocks),dim3(tpb), 0, 0, pv.GetPtr(),nv.GetPtr(),nthreads,GetPtr()); EddyKernels::CudaSync("EddyKernels::subtract_square_and_add_to_me"); if (_spv) { _spcoef.clear(); _spv=false; } } EddyCatch void CudaVolume::DivideWithinMask(const CudaVolume& divisor, const CudaVolume& mask) EddyTry { if (divisor!=*this || mask!=*this) throw EddyException("CudaVolume::DivideWithinMask: Dimension mismatch"); cuda_volume_utils::divide_within_mask(divisor._devec,mask._devec,_devec); if (_spv) { _spcoef.clear(); _spv=false; } } EddyCatch CudaVolume& CudaVolume::Binarise(float tv) EddyTry { try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::Binarise<float>(tv)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Binarise_1: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::Binarise(float ll, float ul) EddyTry { try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::Binarise<float>(ll,ul)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Binarise_2: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::MakeNormRand(float mu, float sigma) EddyTry { try { thrust::counting_iterator<unsigned int> index_seq_begin(0); thrust::transform(index_seq_begin,index_seq_begin+_devec.size(),_devec.begin(),EDDY::MakeNormRand<float>(mu,sigma)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::MakeRandom: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch double CudaVolume::Sum(const CudaVolume& mask) const EddyTry { double sum = 0.0; if (mask.Size()) { if (mask != *this) throw EddyException("CudaVolume::Sum: Mismatched volumes"); try { sum = thrust::inner_product(_devec.begin(),_devec.end(),mask._devec.begin(),sum, thrust::plus<double>(),EDDY::Product<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sum in call inner_product: with message: " << e.what() << std::endl; throw; } } else { try { sum = thrust::reduce(_devec.begin(),_devec.end(),sum,EDDY::Sum<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sum in call reduce: with message: " << e.what() << std::endl; throw; } } return(sum); } EddyCatch double CudaVolume::SumOfSquares(const CudaVolume& mask) const EddyTry { double sos = 0.0; if (mask.Size()) { if (mask != *this) throw EddyException("CudaVolume::SumOfSquares: Mismatched volumes"); try { sos = thrust::inner_product(_devec.begin(),_devec.end(),mask._devec.begin(),sos, thrust::plus<double>(),EDDY::MaskedSquare<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SumOfSquares in call inner_product: with message: " << e.what() << std::endl; throw; } } else { try { sos = thrust::reduce(_devec.begin(),_devec.end(),sos,EDDY::SumSquare<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SumOfSquares in call reduce: with message: " << e.what() << std::endl; throw; } } return(sos); } EddyCatch CudaVolume& CudaVolume::operator=(float val) EddyTry { try { thrust::fill(_devec.begin(), _devec.end(), val); if (_spcoef.size()) { thrust::fill(_spcoef.begin(), _spcoef.end(), val); _spv=true; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch unsigned int CudaVolume::Size(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume::Size: Index out of range"); return(_sz[indx]); } EddyCatch float CudaVolume::Vxs(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume::Vxs: Index out of range"); float vxs = (!indx) ? _hdr.xdim() : ((indx==1) ? _hdr.ydim() : _hdr.zdim()); return(vxs); } EddyCatch NEWMAT::Matrix CudaVolume::Ima2WorldMatrix() const EddyTry { return(_hdr.sampling_mat()); } EddyCatch NEWMAT::Matrix CudaVolume::World2ImaMatrix() const EddyTry { return(_hdr.sampling_mat().i()); } EddyCatch void CudaVolume::GetVolume(NEWIMAGE::volume<float>& ovol) const EddyTry { thrust::host_vector<float> on_host; try { on_host = _devec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::GetVolume with message: " << e.what() << std::endl; throw; } ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int indx=0; for (int k=0; k<ovol.zsize(); k++) { for (int j=0; j<ovol.ysize(); j++) { for (int i=0; i<ovol.xsize(); i++) { ovol(i,j,k) = on_host[indx++]; } } } return; } EddyCatch void CudaVolume::GetSplineCoefs(NEWIMAGE::volume<float>& ovol) const EddyTry { if (!_spv) throw EddyException("CudaVolume::GetSplineCoefs: Attempt to obtain invalid spline coefficients"); thrust::host_vector<float> on_host; try { on_host = _spcoef; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::GetSplineCoefs with message: " << e.what() << std::endl; throw; } ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int indx=0; for (int k=0; k<ovol.zsize(); k++) { for (int j=0; j<ovol.ysize(); j++) { for (int i=0; i<ovol.xsize(); i++) { ovol(i,j,k) = on_host[indx++]; } } } return; } EddyCatch void CudaVolume::common_assignment_from_newimage_vol(const NEWIMAGE::volume<float>& vol, bool ifvol) EddyTry { if (ifvol) { thrust::host_vector<float> hvec(vol.xsize()*vol.ysize()*vol.zsize()); unsigned int i=0; for (NEWIMAGE::volume<float>::fast_const_iterator it=vol.fbegin(); it!=vol.fend(); it++, i++) { hvec[i] = *it; } try { _devec = hvec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after transfer with message: " << e.what() << std::endl; throw; } } else { try { _devec.resize(vol.xsize()*vol.ysize()*vol.zsize()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after resize() with message: " << e.what() << std::endl; throw; } } _sz[0] = vol.xsize(); _sz[1] = vol.ysize(); _sz[2] = vol.zsize(); try { _spcoef.clear(); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after clear() with message: " << e.what() << std::endl; throw; } _spv = false; _hdr.reinitialize(1,1,1); NEWIMAGE::copybasicproperties(vol,_hdr); } EddyCatch void CudaVolume::calculate_spline_coefs(const std::vector<unsigned int>& sz, const thrust::device_vector<float>& ima, thrust::device_vector<float>& coef) const EddyTry { if (ima.size() != coef.size()) throw EddyException("CudaVolume::calculate_spline_coefs: Mismatched ima and coef"); try { thrust::copy(ima.begin(),ima.end(),coef.begin()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::calculate_spline_coefs after copy() with message: " << e.what() << std::endl; throw; } float *cptr = NULL; try { cptr = thrust::raw_pointer_cast(coef.data()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::calculate_spline_coefs after raw_pointer_cast() with message: " << e.what() << std::endl; throw; } float z = -0.267949192431123f; unsigned int nburn = ((log(1e-8)/log(abs(z))) + 1.5); std::vector<unsigned int> initn(3); for (unsigned int i=0; i<3; i++) initn[i] = (nburn > sz[i]) ? sz[i] : nburn; int tpb = threads_per_block_deconv; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; for (unsigned int dir=0; dir<3; dir++) { int nthreads = 1; for (int i=0; i<3; i++) if (i!=dir) nthreads *= sz[i]; int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::cubic_spline_deconvolution), dim3(nblocks),dim3(tpb), 0, 0, cptr,sz[0],sz[1],sz[2],dir, initn[dir],ep,nthreads); EddyKernels::CudaSync("EddyKernels::cubic_spline_deconvolution"); } return; } EddyCatch CudaVolume3D_2_4D_Helper CudaVolume4D::operator[](unsigned int indx) EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::operator[]: indx out of range"); CudaVolume3D_2_4D_Helper hlp(*this,indx); return(hlp); } EddyCatch void CudaVolume4D::SetVolume(unsigned int indx, const CudaVolume& vol) EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::SetVolume: indx out of range"); for (unsigned int i=0; i<3; i++) if (_sz[i] != vol._sz[i]) throw EddyException("CudaVolume4D::SetVolume: Mismatched volumes"); if (!NEWIMAGE::samedim(_hdr,vol._hdr,3)) throw EddyException("CudaVolume4D::SetVolume: Mismatched volumes"); _devecs[indx] = vol._devec; } EddyCatch CudaVolume4D& CudaVolume4D::operator+=(const CudaVolume4D& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume4D::operator+=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume4D::operator+=: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::transform(_devecs[i].begin(),_devecs[i].end(),cv._devecs[i].begin(),_devecs[i].begin(),thrust::plus<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator+= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch CudaVolume4D& CudaVolume4D::operator*=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume4D::operator*=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume4D::operator*=: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::transform(_devecs[i].begin(),_devecs[i].end(),cv._devec.begin(),_devecs[i].begin(),thrust::multiplies<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator*= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch void CudaVolume4D::DivideWithinMask(const CudaVolume& divisor, const CudaVolume& mask) EddyTry { if (divisor!=*this || mask!=*this) throw EddyException("CudaVolume::DivideWithinMask: Dimension mismatch"); if (!this->Size()) throw EddyException("CudaVolume4D::DivideWithinMask: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) cuda_volume_utils::divide_within_mask(divisor._devec,mask._devec,_devecs[i]); } EddyCatch void CudaVolume4D::Smooth(float fwhm, const CudaVolume& mask) EddyTry { *this *= mask; for (unsigned int i=0; i<_devecs.size(); i++) { cuda_volume_utils::smooth(fwhm,_sz,_hdr,_devecs[i]); } CudaVolume smask=mask; smask.Smooth(fwhm); DivideWithinMask(smask,mask); *this *= mask; } EddyCatch CudaVolume4D& CudaVolume4D::operator=(float val) EddyTry { for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::fill(_devecs[i].begin(), _devecs[i].end(), val); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch unsigned int CudaVolume4D::Size(unsigned int indx) const EddyTry { if (indx > 3) throw EddyException("CudaVolume4D::Size: Index out of range"); return(_sz[indx]); } EddyCatch float CudaVolume4D::Vxs(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume4D::Vxs: Index out of range"); float vxs = (!indx) ? _hdr.xdim() : ((indx==1) ? _hdr.ydim() : _hdr.zdim()); return(vxs); } EddyCatch void CudaVolume4D::GetVolume(NEWIMAGE::volume4D<float>& ovol) const EddyTry { ovol.reinitialize(_sz[0],_sz[1],_sz[2],_sz[3]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int volsize = _sz[0]*_sz[1]*_sz[2]; NEWIMAGE::volume<float>::nonsafe_fast_iterator it = ovol.nsfbegin(); for (unsigned int v=0; v<_devecs.size(); v++) { thrust::host_vector<float> on_host; try { on_host = _devecs[v]; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::GetVolume_1: with message: " << e.what() << std::endl; throw; } for (unsigned int i=0; i<volsize; i++, it++) *it = on_host[i]; } return; } EddyCatch void CudaVolume4D::GetVolume(unsigned int indx, NEWIMAGE::volume<float>& ovol) const EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::GetVolume(indx,ovol): indx out of range"); ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); thrust::host_vector<float> on_host; try { on_host = _devecs[indx]; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::GetVolume_2: with message: " << e.what() << std::endl; throw; } unsigned int volsize = _sz[0]*_sz[1]*_sz[2]; NEWIMAGE::volume<float>::nonsafe_fast_iterator it = ovol.nsfbegin(); for (unsigned int i=0; i<volsize; i++, it++) *it = on_host[i]; return; } EddyCatch void CudaVolume4D::common_assignment_from_newimage_vol(const NEWIMAGE::volume<float>& vol, bool ifvol) EddyTry { _devecs.resize(vol.tsize()); unsigned int volsize = static_cast<unsigned int>(vol.xsize()*vol.ysize()*vol.zsize()); if (ifvol) { thrust::host_vector<float> hvec(volsize); for (unsigned int i=0; i<_devecs.size(); i++) { NEWIMAGE::volume<float>::fast_const_iterator it=vol.fbegin(i); for (unsigned int j=0; j<volsize; j++, it++) { hvec[j] = *it; } try { _devecs[i] = hvec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::common_assignment_from_newimage_vol after transfer with index: " << i << ", with message: " << e.what() << std::endl; throw; } } } else { int i; for (i=0; i<vol.tsize(); i++) { try { _devecs[i].resize(volsize); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::common_assignment_from_newimage_vol after resize() with index: " << i << ", with message: " << e.what() << std::endl; throw; } } } _sz[0] = vol.xsize(); _sz[1] = vol.ysize(); _sz[2] = vol.zsize(); _sz[3] = vol.tsize(); _hdr.reinitialize(1,1,1); NEWIMAGE::copybasicproperties(vol,_hdr); } EddyCatch void cuda_volume_utils::smooth(float fwhm, const std::vector<unsigned int>& sz, const NEWIMAGE::volume<float>& hdr, thrust::device_vector<float>& ima) EddyTry { thrust::device_vector<float> xk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.xdim()); thrust::device_vector<float> yk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.ydim()); thrust::device_vector<float> zk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.zdim()); thrust::device_vector<float> sv(sz[0]*sz[1]*sz[2]); int tpb = threads_per_block_convolve_1D; int nthreads = sz[0]*sz[1]*sz[2]; int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::convolve_1D), dim3(nblocks),dim3(tpb), 0, 0, sz[0],sz[1],sz[2],thrust::raw_pointer_cast(ima.data()),thrust::raw_pointer_cast(xk.data()), xk.size(),0,nthreads,thrust::raw_pointer_cast(sv.data())); hipLaunchKernelGGL(( EddyKernels::convolve_1D), dim3(nblocks),dim3(tpb), 0, 0, sz[0],sz[1],sz[2],thrust::raw_pointer_cast(sv.data()),thrust::raw_pointer_cast(yk.data()), yk.size(),1,nthreads,thrust::raw_pointer_cast(ima.data())); hipLaunchKernelGGL(( EddyKernels::convolve_1D), dim3(nblocks),dim3(tpb), 0, 0, sz[0],sz[1],sz[2],thrust::raw_pointer_cast(ima.data()),thrust::raw_pointer_cast(zk.data()), zk.size(),2,nthreads,thrust::raw_pointer_cast(sv.data())); ima = sv; } EddyCatch thrust::host_vector<float> cuda_volume_utils::gaussian_1D_kernel(float fwhm) EddyTry { float s = fwhm/std::sqrt(8.0*::log(2.0)); unsigned int sz = 6*s + 0.5; sz = 2*sz+1; thrust::host_vector<float> rval(sz); double sum=0.0; for (unsigned int i=0; i<sz; i++) { rval[i] = exp(-sqr(int(i)-int(sz)/2)/(2.0*sqr(s))); sum += rval[i]; } for (unsigned int i=0; i<sz; i++) rval[i] /= sum; return(rval); } EddyCatch void cuda_volume_utils::divide_within_mask(const thrust::device_vector<float>& divisor, const thrust::device_vector<float>& mask, thrust::device_vector<float>& ima) EddyTry { try { thrust::transform_if(ima.begin(),ima.end(),divisor.begin(),mask.begin(),ima.begin(), thrust::divides<float>(),thrust::identity<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in cuda_volume_utils::divide_within_mask: with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume3D_2_4D_Helper::operator=(const CudaVolume& threed) EddyTry { for (unsigned int i=0; i<3; i++) if (_fourd._sz[i] != threed._sz[i]) throw EddyException("CudaVolume4D::operator=(CudaVolume): Mismatched 3D volume"); if (!NEWIMAGE::samedim(_fourd._hdr,threed._hdr,3)) throw EddyException("CudaVolume4D::operator=(CudaVolume): Mismatched 3D volume"); _fourd._devecs[_indx] = threed._devec; } EddyCatch void CudaImageCoordinates::Transform(const NEWMAT::Matrix& A) EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::affine_transform_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,A(1,1),A(1,2),A(1,3),A(1,4),A(2,1), A(2,2),A(2,3),A(2,4),A(3,1),A(3,2),A(3,3),A(3,4), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::affine_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const std::vector<NEWMAT::Matrix>& A) EddyTry { if (A.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices A"); thrust::device_vector<float> dA = this->repack_vector_of_matrices(A); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::slice_wise_affine_transform_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,thrust::raw_pointer_cast(dA.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_wise_affine_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const NEWMAT::Matrix& A, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& B) EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::general_transform_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),A(1,1),A(1,2),A(1,3),A(1,4), A(2,1),A(2,2),A(2,3),A(2,4),A(3,1),A(3,2), A(3,3),A(3,4),B(1,1),B(1,2),B(1,3),B(1,4), B(2,1),B(2,2),B(2,3),B(2,4),B(3,1),B(3,2),B(3,3), B(3,4),XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::general_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const std::vector<NEWMAT::Matrix>& A, const EDDY::CudaVolume4D& dfield, const std::vector<NEWMAT::Matrix>& B) EddyTry { if (A.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices A"); if (B.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices B"); thrust::device_vector<float> dA = this->repack_vector_of_matrices(A); thrust::device_vector<float> dB = this->repack_vector_of_matrices(B); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::slice_wise_general_transform_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dA.data()), thrust::raw_pointer_cast(dB.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_wise_general_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::GetSliceToVolXYZCoord(const NEWMAT::Matrix& M1, const std::vector<NEWMAT::Matrix>& R, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& M2, EDDY::CudaVolume& zcoord) EddyTry { if (R.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Mismatched vector of matrices R"); if (M1(1,2) != 0.0 || M1(1,3) != 0.0 || M1(2,1) != 0.0 || M1(2,3) != 0.0 || M1(3,1) != 0.0 || M1(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Invalid M1 matrix"); } if (M2(1,2) != 0.0 || M2(1,3) != 0.0 || M2(2,1) != 0.0 || M2(2,3) != 0.0 || M2(3,1) != 0.0 || M2(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Invalid M2 matrix"); } thrust::device_vector<float> dM1 = this->repack_matrix(M1); thrust::device_vector<float> dR = this->repack_vector_of_matrices(R); thrust::device_vector<float> dM2 = this->repack_matrix(M2); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::slice_to_vol_xyz_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dM1.data()), thrust::raw_pointer_cast(dR.data()),thrust::raw_pointer_cast(dM2.data()), XPtr(),YPtr(),ZPtr(),zcoord.GetPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_to_vol_xyz_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::GetSliceToVolZCoord(const NEWMAT::Matrix& M1, const std::vector<NEWMAT::Matrix>& R, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& M2) EddyTry { if (R.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Mismatched vector of matrices R"); if (M1(1,2) != 0.0 || M1(1,3) != 0.0 || M1(2,1) != 0.0 || M1(2,3) != 0.0 || M1(3,1) != 0.0 || M1(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Invalid M1 matrix"); } if (M2(1,2) != 0.0 || M2(1,3) != 0.0 || M2(2,1) != 0.0 || M2(2,3) != 0.0 || M2(3,1) != 0.0 || M2(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Invalid M2 matrix"); } thrust::device_vector<float> dM1 = this->repack_matrix(M1); thrust::device_vector<float> dR = this->repack_vector_of_matrices(R); thrust::device_vector<float> dM2 = this->repack_matrix(M2); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::slice_to_vol_z_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dM1.data()), thrust::raw_pointer_cast(dR.data()),thrust::raw_pointer_cast(dM2.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_to_vol_z_coordinates"); _init=true; return; } EddyCatch CudaImageCoordinates& CudaImageCoordinates::operator-=(const CudaImageCoordinates& rhs) EddyTry { if (this->Size() != rhs.Size()) throw EddyException("CudaImageCoordinates::operator-=: Size mismatch."); if (!_init) init_coord(); if (!rhs._init) { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::implicit_coord_sub), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,XPtr(),YPtr(),ZPtr(),nthreads); } else { try { thrust::transform(_x.begin(),_x.end(),rhs._x.begin(),_x.begin(),thrust::minus<float>()); thrust::transform(_y.begin(),_y.end(),rhs._y.begin(),_y.begin(),thrust::minus<float>()); thrust::transform(_z.begin(),_z.end(),rhs._z.begin(),_z.begin(),thrust::minus<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaImageCoordinates::::operator-= with message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch NEWMAT::Matrix CudaImageCoordinates::AsMatrix() const EddyTry { NEWMAT::Matrix rval(Size(),3); thrust::host_vector<float> x = _x; thrust::host_vector<float> y = _y; thrust::host_vector<float> z = _z; for (unsigned int i=0; i<Size(); i++) { rval(i+1,1) = x[i]; rval(i+1,2) = y[i]; rval(i+1,3) = z[i]; } return(rval); } EddyCatch void CudaImageCoordinates::Write(const std::string& fname, unsigned int n) const EddyTry { NEWMAT::Matrix coord = AsMatrix(); if (n && n<Size()) MISCMATHS::write_ascii_matrix(fname,coord.Rows(1,n)); else MISCMATHS::write_ascii_matrix(fname,coord); } EddyCatch void CudaImageCoordinates::init_coord() EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; hipLaunchKernelGGL(( EddyKernels::make_coordinates), dim3(nblocks),dim3(tpb), 0, 0, _xn,_yn,_zn,XPtr(),YPtr(),ZPtr(),nthreads); EddyKernels::CudaSync("EddyKernels::make_coordinates"); _init = true; return; } EddyCatch thrust::device_vector<float> CudaImageCoordinates::repack_matrix(const NEWMAT::Matrix& A) EddyTry { thrust::host_vector<float> hA(12); hA[0] = A(1,1); hA[1] = A(1,2); hA[2] = A(1,3); hA[3] = A(1,4); hA[4] = A(2,1); hA[5] = A(2,2); hA[6] = A(2,3); hA[7] = A(2,4); hA[8] = A(3,1); hA[9] = A(3,2); hA[10] = A(3,3); hA[11] = A(3,4); return(hA); } EddyCatch thrust::device_vector<float> CudaImageCoordinates::repack_vector_of_matrices(const std::vector<NEWMAT::Matrix>& A) EddyTry { thrust::host_vector<float> hA(12*A.size()); for (unsigned int i=0; i<A.size(); i++) { unsigned int offs = 12*i; hA[offs] = A[i](1,1); hA[offs+1] = A[i](1,2); hA[offs+2] = A[i](1,3); hA[offs+3] = A[i](1,4); hA[offs+4] = A[i](2,1); hA[offs+5] = A[i](2,2); hA[offs+6] = A[i](2,3); hA[offs+7] = A[i](2,4); hA[offs+8] = A[i](3,1); hA[offs+9] = A[i](3,2); hA[offs+10] = A[i](3,3); hA[offs+11] = A[i](3,4); } return(hA); } EddyCatch
7d2c85b03e7bb75b36ce66527ad029870df832b1.cu
#include <cstdlib> #include <string> #include <sys/time.h> #include <vector> #include <cmath> #include <cuda.h> #include <thrust/system_error.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/transform.h> #include <thrust/fill.h> #include <thrust/inner_product.h> #pragma push #pragma diag_suppress = code_is_unreachable #include "newmat.h" #include "miscmaths/miscmaths.h" #ifndef EXPOSE_TREACHEROUS #define I_CUDAVOLUME_H_DEFINED_ET #define EXPOSE_TREACHEROUS #endif #include "newimage/newimageall.h" #pragma pop #include "EddyHelperClasses.h" #include "EddyKernels.h" #include "EddyFunctors.h" #include "CudaVolume.h" using namespace EDDY; using namespace EddyKernels; void CudaVolume::SetHdr(const CudaVolume4D& cv) EddyTry { _spv=false; _sz=cv._sz; _hdr=cv._hdr; try { _devec.resize(cv.Size()); _spcoef.clear(); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SetHdr with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume::Sample(const EDDY::CudaImageCoordinates& coord, CudaVolume& smpl) const EddyTry { if (Interp()!=NEWIMAGE::spline && Interp()!=NEWIMAGE::trilinear) throw EddyException("CudaVolume::Sample: Invalid interpolation option"); if (Extrap()!=NEWIMAGE::extraslice && Extrap()!=NEWIMAGE::periodic && Extrap()!=NEWIMAGE::mirror) throw EddyException("CudaVolume::Sample: Invalid extrapolation option"); if (smpl!=*this) throw EddyException("CudaVolume::Sample: Dimension mismatch"); if (Interp()==NEWIMAGE::spline && !_spv) { if (_spcoef.size() != _devec.size()) { try { _spcoef.resize(_devec.size()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sample_1 after call to resize with message: " << e.what() << std::endl; throw; } } calculate_spline_coefs(_sz,_devec,_spcoef); _spv = true; } int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; else if (Extrap()==NEWIMAGE::mirror) ep = EddyKernels::MIRROR; if (Interp()==NEWIMAGE::spline) { EddyKernels::spline_interpolate<<<nblocks,tpb>>>(Size(0),Size(1),Size(2),sp_ptr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr()); EddyKernels::CudaSync("EddyKernels::spline_interpolate"); } else { EddyKernels::linear_interpolate<<<nblocks,tpb>>>(Size(0),Size(1),Size(2),GetPtr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr()); EddyKernels::CudaSync("EddyKernels::linear_interpolate"); } } EddyCatch void CudaVolume::Sample(const EDDY::CudaImageCoordinates& coord, CudaVolume& smpl, CudaVolume4D& dsmpl) const EddyTry { if (Interp()!=NEWIMAGE::spline && Interp()!=NEWIMAGE::trilinear) throw EddyException("CudaVolume::Sample: Invalid interpolation option"); if (Extrap()!=NEWIMAGE::extraslice && Extrap()!=NEWIMAGE::periodic && Extrap()!=NEWIMAGE::mirror) throw EddyException("CudaVolume::Sample: Invalid extrapolation option"); if (smpl!=(*this) || dsmpl!=(*this)) throw EddyException("CudaVolume::Sample: Dimension mismatch"); if (dsmpl.Size(3)!=3) throw EddyException("CudaVolume::Sample: dsmpl.Size(3) must be 3"); if (Interp()==NEWIMAGE::spline && !_spv) { if (_spcoef.size() != _devec.size()) { try { _spcoef.resize(_devec.size()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sample_2 after call to resize with message: " << e.what() << std::endl; throw; } } calculate_spline_coefs(_sz,_devec,_spcoef); _spv = true; } int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; else if (Extrap()==NEWIMAGE::mirror) ep = EddyKernels::MIRROR; if (Interp()==NEWIMAGE::spline) { EddyKernels::spline_interpolate<<<nblocks,tpb>>>(Size(0),Size(1),Size(2),sp_ptr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr(), dsmpl.GetPtr(0),dsmpl.GetPtr(1),dsmpl.GetPtr(2)); EddyKernels::CudaSync("EddyKernels::spline_interpolate"); } else { EddyKernels::linear_interpolate<<<nblocks,tpb>>>(Size(0),Size(1),Size(2),GetPtr(),coord.XPtr(), coord.YPtr(),coord.ZPtr(),nthreads,ep,smpl.GetPtr(), dsmpl.GetPtr(0),dsmpl.GetPtr(1),dsmpl.GetPtr(2)); EddyKernels::CudaSync("EddyKernels::linear_interpolate"); } } EddyCatch void CudaVolume::ValidMask(const EDDY::CudaImageCoordinates& coord, CudaVolume& mask) const EddyTry { int tpb = threads_per_block_interpolate; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; std::vector<bool> epval = ExtrapValid(); EddyKernels::valid_voxels<<<nblocks,tpb>>>(Size(0),Size(1),Size(2),epval[0],epval[1],epval[2], coord.XPtr(),coord.YPtr(),coord.ZPtr(),nthreads,mask.GetPtr()); EddyKernels::CudaSync("EddyKernels::valid_voxels"); } EddyCatch bool CudaVolume::operator==(const CudaVolume4D& rhs) const EddyTry { return(this->_sz[0]==rhs.Size(0) && this->_sz[1]==rhs.Size(1) && this->_sz[2]==rhs.Size(2) && fabs(this->_hdr.xdim()-rhs.Vxs(0))<1e-6 && fabs(this->_hdr.ydim()-rhs.Vxs(1))<1e-6 && fabs(this->_hdr.zdim()-rhs.Vxs(2))<1e-6); } EddyCatch CudaVolume& CudaVolume::operator+=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator+=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator+=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::plus<float>()); if (_spv && cv._spv) { thrust::transform(_spcoef.begin(),_spcoef.end(),cv._spcoef.begin(),_spcoef.begin(),thrust::plus<float>()); } else _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator+= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator-=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator-=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator-=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::minus<float>()); if (_spv && cv._spv) { thrust::transform(_spcoef.begin(),_spcoef.end(),cv._spcoef.begin(),_spcoef.begin(),thrust::minus<float>()); } else _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator-= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator*=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume::operator*=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume::operator*=: Empty volume"); try { thrust::transform(_devec.begin(),_devec.end(),cv._devec.begin(),_devec.begin(),thrust::multiplies<float>()); _spv=false; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator*= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::operator/=(float a) EddyTry { if (!a) throw EddyException("CudaVolume::operator/=: Division by zero"); try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::MulByScalar<float>(1.0/a)); if (_spv) thrust::transform(_spcoef.begin(),_spcoef.end(),_spcoef.begin(),EDDY::MulByScalar<float>(1.0/a)); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator/= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch void CudaVolume::Smooth(float fwhm, const CudaVolume& mask) EddyTry { CudaVolume smask=mask; *this *= mask; this->Smooth(fwhm); smask.Smooth(fwhm); this->DivideWithinMask(smask,mask); *this *= mask; } EddyCatch void CudaVolume::MultiplyAndAddToMe(const CudaVolume& pv, float a) EddyTry { if (pv!=*this) throw EddyException("CudaVolume::MultiplyAndAddToMe: Dimension mismatch"); try { thrust::transform(_devec.begin(),_devec.end(),pv._devec.begin(),_devec.begin(),EDDY::MulAndAdd<float>(a)); if (_spv) { if (pv._spv) thrust::transform(_spcoef.begin(),_spcoef.end(),pv._spcoef.begin(),_spcoef.begin(),EDDY::MulAndAdd<float>(a)); else { _spcoef.clear(); _spv=false; }; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::MultiplyAndAddToMe: with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume::SubtractMultiplyAndAddToMe(const CudaVolume& pv, const CudaVolume& nv, float a) EddyTry { if (pv!=*this || nv!=*this) throw EddyException("CudaVolume::SubtractMultiplyAndAddToMe: Dimension mismatch"); int tpb = threads_per_block_smaatm; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::subtract_multiply_and_add_to_me<<<nblocks,tpb>>>(pv.GetPtr(),nv.GetPtr(),a,nthreads,GetPtr()); EddyKernels::CudaSync("EddyKernels::subtract_multiply_and_add_to_me"); if (_spv) { _spcoef.clear(); _spv=false; } return; } EddyCatch void CudaVolume::SubtractSquareAndAddToMe(const CudaVolume& pv, const CudaVolume& nv) EddyTry { if (pv!=*this || nv!=*this) throw EddyException("CudaVolume::SubtractSquareAndAddToMe: Dimension mismatch"); int tpb = threads_per_block_ssaatm; int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::subtract_square_and_add_to_me<<<nblocks,tpb>>>(pv.GetPtr(),nv.GetPtr(),nthreads,GetPtr()); EddyKernels::CudaSync("EddyKernels::subtract_square_and_add_to_me"); if (_spv) { _spcoef.clear(); _spv=false; } } EddyCatch void CudaVolume::DivideWithinMask(const CudaVolume& divisor, const CudaVolume& mask) EddyTry { if (divisor!=*this || mask!=*this) throw EddyException("CudaVolume::DivideWithinMask: Dimension mismatch"); cuda_volume_utils::divide_within_mask(divisor._devec,mask._devec,_devec); if (_spv) { _spcoef.clear(); _spv=false; } } EddyCatch CudaVolume& CudaVolume::Binarise(float tv) EddyTry { try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::Binarise<float>(tv)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Binarise_1: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::Binarise(float ll, float ul) EddyTry { try { thrust::transform(_devec.begin(),_devec.end(),_devec.begin(),EDDY::Binarise<float>(ll,ul)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Binarise_2: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch CudaVolume& CudaVolume::MakeNormRand(float mu, float sigma) EddyTry { try { thrust::counting_iterator<unsigned int> index_seq_begin(0); thrust::transform(index_seq_begin,index_seq_begin+_devec.size(),_devec.begin(),EDDY::MakeNormRand<float>(mu,sigma)); if (_spv) { _spcoef.clear(); _spv=false; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::MakeRandom: with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch double CudaVolume::Sum(const CudaVolume& mask) const EddyTry { double sum = 0.0; if (mask.Size()) { if (mask != *this) throw EddyException("CudaVolume::Sum: Mismatched volumes"); try { sum = thrust::inner_product(_devec.begin(),_devec.end(),mask._devec.begin(),sum, thrust::plus<double>(),EDDY::Product<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sum in call inner_product: with message: " << e.what() << std::endl; throw; } } else { try { sum = thrust::reduce(_devec.begin(),_devec.end(),sum,EDDY::Sum<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::Sum in call reduce: with message: " << e.what() << std::endl; throw; } } return(sum); } EddyCatch double CudaVolume::SumOfSquares(const CudaVolume& mask) const EddyTry { double sos = 0.0; if (mask.Size()) { if (mask != *this) throw EddyException("CudaVolume::SumOfSquares: Mismatched volumes"); try { sos = thrust::inner_product(_devec.begin(),_devec.end(),mask._devec.begin(),sos, thrust::plus<double>(),EDDY::MaskedSquare<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SumOfSquares in call inner_product: with message: " << e.what() << std::endl; throw; } } else { try { sos = thrust::reduce(_devec.begin(),_devec.end(),sos,EDDY::SumSquare<float,double>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::SumOfSquares in call reduce: with message: " << e.what() << std::endl; throw; } } return(sos); } EddyCatch CudaVolume& CudaVolume::operator=(float val) EddyTry { try { thrust::fill(_devec.begin(), _devec.end(), val); if (_spcoef.size()) { thrust::fill(_spcoef.begin(), _spcoef.end(), val); _spv=true; } } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::operator= with message: " << e.what() << std::endl; throw; } return(*this); } EddyCatch unsigned int CudaVolume::Size(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume::Size: Index out of range"); return(_sz[indx]); } EddyCatch float CudaVolume::Vxs(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume::Vxs: Index out of range"); float vxs = (!indx) ? _hdr.xdim() : ((indx==1) ? _hdr.ydim() : _hdr.zdim()); return(vxs); } EddyCatch NEWMAT::Matrix CudaVolume::Ima2WorldMatrix() const EddyTry { return(_hdr.sampling_mat()); } EddyCatch NEWMAT::Matrix CudaVolume::World2ImaMatrix() const EddyTry { return(_hdr.sampling_mat().i()); } EddyCatch void CudaVolume::GetVolume(NEWIMAGE::volume<float>& ovol) const EddyTry { thrust::host_vector<float> on_host; try { on_host = _devec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::GetVolume with message: " << e.what() << std::endl; throw; } ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int indx=0; for (int k=0; k<ovol.zsize(); k++) { for (int j=0; j<ovol.ysize(); j++) { for (int i=0; i<ovol.xsize(); i++) { ovol(i,j,k) = on_host[indx++]; } } } return; } EddyCatch void CudaVolume::GetSplineCoefs(NEWIMAGE::volume<float>& ovol) const EddyTry { if (!_spv) throw EddyException("CudaVolume::GetSplineCoefs: Attempt to obtain invalid spline coefficients"); thrust::host_vector<float> on_host; try { on_host = _spcoef; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::GetSplineCoefs with message: " << e.what() << std::endl; throw; } ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int indx=0; for (int k=0; k<ovol.zsize(); k++) { for (int j=0; j<ovol.ysize(); j++) { for (int i=0; i<ovol.xsize(); i++) { ovol(i,j,k) = on_host[indx++]; } } } return; } EddyCatch void CudaVolume::common_assignment_from_newimage_vol(const NEWIMAGE::volume<float>& vol, bool ifvol) EddyTry { if (ifvol) { thrust::host_vector<float> hvec(vol.xsize()*vol.ysize()*vol.zsize()); unsigned int i=0; for (NEWIMAGE::volume<float>::fast_const_iterator it=vol.fbegin(); it!=vol.fend(); it++, i++) { hvec[i] = *it; } try { _devec = hvec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after transfer with message: " << e.what() << std::endl; throw; } } else { try { _devec.resize(vol.xsize()*vol.ysize()*vol.zsize()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after resize() with message: " << e.what() << std::endl; throw; } } _sz[0] = vol.xsize(); _sz[1] = vol.ysize(); _sz[2] = vol.zsize(); try { _spcoef.clear(); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::common_assignment_from_newimage_vol after clear() with message: " << e.what() << std::endl; throw; } _spv = false; _hdr.reinitialize(1,1,1); NEWIMAGE::copybasicproperties(vol,_hdr); } EddyCatch void CudaVolume::calculate_spline_coefs(const std::vector<unsigned int>& sz, const thrust::device_vector<float>& ima, thrust::device_vector<float>& coef) const EddyTry { if (ima.size() != coef.size()) throw EddyException("CudaVolume::calculate_spline_coefs: Mismatched ima and coef"); try { thrust::copy(ima.begin(),ima.end(),coef.begin()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::calculate_spline_coefs after copy() with message: " << e.what() << std::endl; throw; } float *cptr = NULL; try { cptr = thrust::raw_pointer_cast(coef.data()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume::calculate_spline_coefs after raw_pointer_cast() with message: " << e.what() << std::endl; throw; } float z = -0.267949192431123f; unsigned int nburn = ((log(1e-8)/log(abs(z))) + 1.5); std::vector<unsigned int> initn(3); for (unsigned int i=0; i<3; i++) initn[i] = (nburn > sz[i]) ? sz[i] : nburn; int tpb = threads_per_block_deconv; EddyKernels::ExtrapType ep = EddyKernels::PERIODIC; if (Extrap()==NEWIMAGE::extraslice) ep = EddyKernels::CONSTANT; for (unsigned int dir=0; dir<3; dir++) { int nthreads = 1; for (int i=0; i<3; i++) if (i!=dir) nthreads *= sz[i]; int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::cubic_spline_deconvolution<<<nblocks,tpb>>> (cptr,sz[0],sz[1],sz[2],dir, initn[dir],ep,nthreads); EddyKernels::CudaSync("EddyKernels::cubic_spline_deconvolution"); } return; } EddyCatch CudaVolume3D_2_4D_Helper CudaVolume4D::operator[](unsigned int indx) EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::operator[]: indx out of range"); CudaVolume3D_2_4D_Helper hlp(*this,indx); return(hlp); } EddyCatch void CudaVolume4D::SetVolume(unsigned int indx, const CudaVolume& vol) EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::SetVolume: indx out of range"); for (unsigned int i=0; i<3; i++) if (_sz[i] != vol._sz[i]) throw EddyException("CudaVolume4D::SetVolume: Mismatched volumes"); if (!NEWIMAGE::samedim(_hdr,vol._hdr,3)) throw EddyException("CudaVolume4D::SetVolume: Mismatched volumes"); _devecs[indx] = vol._devec; } EddyCatch CudaVolume4D& CudaVolume4D::operator+=(const CudaVolume4D& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume4D::operator+=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume4D::operator+=: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::transform(_devecs[i].begin(),_devecs[i].end(),cv._devecs[i].begin(),_devecs[i].begin(),thrust::plus<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator+= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch CudaVolume4D& CudaVolume4D::operator*=(const CudaVolume& cv) EddyTry { if (*this != cv) throw EddyException("CudaVolume4D::operator*=: Mismatched volumes"); if (!this->Size()) throw EddyException("CudaVolume4D::operator*=: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::transform(_devecs[i].begin(),_devecs[i].end(),cv._devec.begin(),_devecs[i].begin(),thrust::multiplies<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator*= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch void CudaVolume4D::DivideWithinMask(const CudaVolume& divisor, const CudaVolume& mask) EddyTry { if (divisor!=*this || mask!=*this) throw EddyException("CudaVolume::DivideWithinMask: Dimension mismatch"); if (!this->Size()) throw EddyException("CudaVolume4D::DivideWithinMask: Empty volume"); for (unsigned int i=0; i<_devecs.size(); i++) cuda_volume_utils::divide_within_mask(divisor._devec,mask._devec,_devecs[i]); } EddyCatch void CudaVolume4D::Smooth(float fwhm, const CudaVolume& mask) EddyTry { *this *= mask; for (unsigned int i=0; i<_devecs.size(); i++) { cuda_volume_utils::smooth(fwhm,_sz,_hdr,_devecs[i]); } CudaVolume smask=mask; smask.Smooth(fwhm); DivideWithinMask(smask,mask); *this *= mask; } EddyCatch CudaVolume4D& CudaVolume4D::operator=(float val) EddyTry { for (unsigned int i=0; i<_devecs.size(); i++) { try { thrust::fill(_devecs[i].begin(), _devecs[i].end(), val); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::operator= with index: " << i << ", and message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch unsigned int CudaVolume4D::Size(unsigned int indx) const EddyTry { if (indx > 3) throw EddyException("CudaVolume4D::Size: Index out of range"); return(_sz[indx]); } EddyCatch float CudaVolume4D::Vxs(unsigned int indx) const EddyTry { if (indx > 2) throw EddyException("CudaVolume4D::Vxs: Index out of range"); float vxs = (!indx) ? _hdr.xdim() : ((indx==1) ? _hdr.ydim() : _hdr.zdim()); return(vxs); } EddyCatch void CudaVolume4D::GetVolume(NEWIMAGE::volume4D<float>& ovol) const EddyTry { ovol.reinitialize(_sz[0],_sz[1],_sz[2],_sz[3]); NEWIMAGE::copybasicproperties(_hdr,ovol); unsigned int volsize = _sz[0]*_sz[1]*_sz[2]; NEWIMAGE::volume<float>::nonsafe_fast_iterator it = ovol.nsfbegin(); for (unsigned int v=0; v<_devecs.size(); v++) { thrust::host_vector<float> on_host; try { on_host = _devecs[v]; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::GetVolume_1: with message: " << e.what() << std::endl; throw; } for (unsigned int i=0; i<volsize; i++, it++) *it = on_host[i]; } return; } EddyCatch void CudaVolume4D::GetVolume(unsigned int indx, NEWIMAGE::volume<float>& ovol) const EddyTry { if (indx >= _sz[3]) throw EddyException("CudaVolume4D::GetVolume(indx,ovol): indx out of range"); ovol.reinitialize(_sz[0],_sz[1],_sz[2]); NEWIMAGE::copybasicproperties(_hdr,ovol); thrust::host_vector<float> on_host; try { on_host = _devecs[indx]; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::GetVolume_2: with message: " << e.what() << std::endl; throw; } unsigned int volsize = _sz[0]*_sz[1]*_sz[2]; NEWIMAGE::volume<float>::nonsafe_fast_iterator it = ovol.nsfbegin(); for (unsigned int i=0; i<volsize; i++, it++) *it = on_host[i]; return; } EddyCatch void CudaVolume4D::common_assignment_from_newimage_vol(const NEWIMAGE::volume<float>& vol, bool ifvol) EddyTry { _devecs.resize(vol.tsize()); unsigned int volsize = static_cast<unsigned int>(vol.xsize()*vol.ysize()*vol.zsize()); if (ifvol) { thrust::host_vector<float> hvec(volsize); for (unsigned int i=0; i<_devecs.size(); i++) { NEWIMAGE::volume<float>::fast_const_iterator it=vol.fbegin(i); for (unsigned int j=0; j<volsize; j++, it++) { hvec[j] = *it; } try { _devecs[i] = hvec; } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::common_assignment_from_newimage_vol after transfer with index: " << i << ", with message: " << e.what() << std::endl; throw; } } } else { int i; for (i=0; i<vol.tsize(); i++) { try { _devecs[i].resize(volsize); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaVolume4D::common_assignment_from_newimage_vol after resize() with index: " << i << ", with message: " << e.what() << std::endl; throw; } } } _sz[0] = vol.xsize(); _sz[1] = vol.ysize(); _sz[2] = vol.zsize(); _sz[3] = vol.tsize(); _hdr.reinitialize(1,1,1); NEWIMAGE::copybasicproperties(vol,_hdr); } EddyCatch void cuda_volume_utils::smooth(float fwhm, const std::vector<unsigned int>& sz, const NEWIMAGE::volume<float>& hdr, thrust::device_vector<float>& ima) EddyTry { thrust::device_vector<float> xk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.xdim()); thrust::device_vector<float> yk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.ydim()); thrust::device_vector<float> zk = cuda_volume_utils::gaussian_1D_kernel(fwhm/hdr.zdim()); thrust::device_vector<float> sv(sz[0]*sz[1]*sz[2]); int tpb = threads_per_block_convolve_1D; int nthreads = sz[0]*sz[1]*sz[2]; int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::convolve_1D<<<nblocks,tpb>>>(sz[0],sz[1],sz[2],thrust::raw_pointer_cast(ima.data()),thrust::raw_pointer_cast(xk.data()), xk.size(),0,nthreads,thrust::raw_pointer_cast(sv.data())); EddyKernels::convolve_1D<<<nblocks,tpb>>>(sz[0],sz[1],sz[2],thrust::raw_pointer_cast(sv.data()),thrust::raw_pointer_cast(yk.data()), yk.size(),1,nthreads,thrust::raw_pointer_cast(ima.data())); EddyKernels::convolve_1D<<<nblocks,tpb>>>(sz[0],sz[1],sz[2],thrust::raw_pointer_cast(ima.data()),thrust::raw_pointer_cast(zk.data()), zk.size(),2,nthreads,thrust::raw_pointer_cast(sv.data())); ima = sv; } EddyCatch thrust::host_vector<float> cuda_volume_utils::gaussian_1D_kernel(float fwhm) EddyTry { float s = fwhm/std::sqrt(8.0*std::log(2.0)); unsigned int sz = 6*s + 0.5; sz = 2*sz+1; thrust::host_vector<float> rval(sz); double sum=0.0; for (unsigned int i=0; i<sz; i++) { rval[i] = exp(-sqr(int(i)-int(sz)/2)/(2.0*sqr(s))); sum += rval[i]; } for (unsigned int i=0; i<sz; i++) rval[i] /= sum; return(rval); } EddyCatch void cuda_volume_utils::divide_within_mask(const thrust::device_vector<float>& divisor, const thrust::device_vector<float>& mask, thrust::device_vector<float>& ima) EddyTry { try { thrust::transform_if(ima.begin(),ima.end(),divisor.begin(),mask.begin(),ima.begin(), thrust::divides<float>(),thrust::identity<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in cuda_volume_utils::divide_within_mask: with message: " << e.what() << std::endl; throw; } } EddyCatch void CudaVolume3D_2_4D_Helper::operator=(const CudaVolume& threed) EddyTry { for (unsigned int i=0; i<3; i++) if (_fourd._sz[i] != threed._sz[i]) throw EddyException("CudaVolume4D::operator=(CudaVolume): Mismatched 3D volume"); if (!NEWIMAGE::samedim(_fourd._hdr,threed._hdr,3)) throw EddyException("CudaVolume4D::operator=(CudaVolume): Mismatched 3D volume"); _fourd._devecs[_indx] = threed._devec; } EddyCatch void CudaImageCoordinates::Transform(const NEWMAT::Matrix& A) EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::affine_transform_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,A(1,1),A(1,2),A(1,3),A(1,4),A(2,1), A(2,2),A(2,3),A(2,4),A(3,1),A(3,2),A(3,3),A(3,4), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::affine_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const std::vector<NEWMAT::Matrix>& A) EddyTry { if (A.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices A"); thrust::device_vector<float> dA = this->repack_vector_of_matrices(A); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::slice_wise_affine_transform_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,thrust::raw_pointer_cast(dA.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_wise_affine_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const NEWMAT::Matrix& A, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& B) EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::general_transform_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),A(1,1),A(1,2),A(1,3),A(1,4), A(2,1),A(2,2),A(2,3),A(2,4),A(3,1),A(3,2), A(3,3),A(3,4),B(1,1),B(1,2),B(1,3),B(1,4), B(2,1),B(2,2),B(2,3),B(2,4),B(3,1),B(3,2),B(3,3), B(3,4),XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::general_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::Transform(const std::vector<NEWMAT::Matrix>& A, const EDDY::CudaVolume4D& dfield, const std::vector<NEWMAT::Matrix>& B) EddyTry { if (A.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices A"); if (B.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::Transform: Mismatched vector of matrices B"); thrust::device_vector<float> dA = this->repack_vector_of_matrices(A); thrust::device_vector<float> dB = this->repack_vector_of_matrices(B); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::slice_wise_general_transform_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dA.data()), thrust::raw_pointer_cast(dB.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_wise_general_transform_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::GetSliceToVolXYZCoord(const NEWMAT::Matrix& M1, const std::vector<NEWMAT::Matrix>& R, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& M2, EDDY::CudaVolume& zcoord) EddyTry { if (R.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Mismatched vector of matrices R"); if (M1(1,2) != 0.0 || M1(1,3) != 0.0 || M1(2,1) != 0.0 || M1(2,3) != 0.0 || M1(3,1) != 0.0 || M1(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Invalid M1 matrix"); } if (M2(1,2) != 0.0 || M2(1,3) != 0.0 || M2(2,1) != 0.0 || M2(2,3) != 0.0 || M2(3,1) != 0.0 || M2(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolXYZCoord: Invalid M2 matrix"); } thrust::device_vector<float> dM1 = this->repack_matrix(M1); thrust::device_vector<float> dR = this->repack_vector_of_matrices(R); thrust::device_vector<float> dM2 = this->repack_matrix(M2); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::slice_to_vol_xyz_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dM1.data()), thrust::raw_pointer_cast(dR.data()),thrust::raw_pointer_cast(dM2.data()), XPtr(),YPtr(),ZPtr(),zcoord.GetPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_to_vol_xyz_coordinates"); _init=true; return; } EddyCatch void CudaImageCoordinates::GetSliceToVolZCoord(const NEWMAT::Matrix& M1, const std::vector<NEWMAT::Matrix>& R, const EDDY::CudaVolume4D& dfield, const NEWMAT::Matrix& M2) EddyTry { if (R.size() != this->Size(2)) throw EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Mismatched vector of matrices R"); if (M1(1,2) != 0.0 || M1(1,3) != 0.0 || M1(2,1) != 0.0 || M1(2,3) != 0.0 || M1(3,1) != 0.0 || M1(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Invalid M1 matrix"); } if (M2(1,2) != 0.0 || M2(1,3) != 0.0 || M2(2,1) != 0.0 || M2(2,3) != 0.0 || M2(3,1) != 0.0 || M2(3,2) != 0.0) { EddyException("CudaImageCoordinates::GetSliceToVolZCoord: Invalid M2 matrix"); } thrust::device_vector<float> dM1 = this->repack_matrix(M1); thrust::device_vector<float> dR = this->repack_vector_of_matrices(R); thrust::device_vector<float> dM2 = this->repack_matrix(M2); int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::slice_to_vol_z_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,dfield.GetPtr(0),dfield.GetPtr(1), dfield.GetPtr(2),thrust::raw_pointer_cast(dM1.data()), thrust::raw_pointer_cast(dR.data()),thrust::raw_pointer_cast(dM2.data()), XPtr(),YPtr(),ZPtr(),_init,nthreads); EddyKernels::CudaSync("EddyKernels::slice_to_vol_z_coordinates"); _init=true; return; } EddyCatch CudaImageCoordinates& CudaImageCoordinates::operator-=(const CudaImageCoordinates& rhs) EddyTry { if (this->Size() != rhs.Size()) throw EddyException("CudaImageCoordinates::operator-=: Size mismatch."); if (!_init) init_coord(); if (!rhs._init) { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::implicit_coord_sub<<<nblocks,tpb>>>(_xn,_yn,_zn,XPtr(),YPtr(),ZPtr(),nthreads); } else { try { thrust::transform(_x.begin(),_x.end(),rhs._x.begin(),_x.begin(),thrust::minus<float>()); thrust::transform(_y.begin(),_y.end(),rhs._y.begin(),_y.begin(),thrust::minus<float>()); thrust::transform(_z.begin(),_z.end(),rhs._z.begin(),_z.begin(),thrust::minus<float>()); } catch(thrust::system_error &e) { std::cerr << "thrust::system_error thrown in CudaImageCoordinates::::operator-= with message: " << e.what() << std::endl; throw; } } return(*this); } EddyCatch NEWMAT::Matrix CudaImageCoordinates::AsMatrix() const EddyTry { NEWMAT::Matrix rval(Size(),3); thrust::host_vector<float> x = _x; thrust::host_vector<float> y = _y; thrust::host_vector<float> z = _z; for (unsigned int i=0; i<Size(); i++) { rval(i+1,1) = x[i]; rval(i+1,2) = y[i]; rval(i+1,3) = z[i]; } return(rval); } EddyCatch void CudaImageCoordinates::Write(const std::string& fname, unsigned int n) const EddyTry { NEWMAT::Matrix coord = AsMatrix(); if (n && n<Size()) MISCMATHS::write_ascii_matrix(fname,coord.Rows(1,n)); else MISCMATHS::write_ascii_matrix(fname,coord); } EddyCatch void CudaImageCoordinates::init_coord() EddyTry { int tpb = threads_per_block; unsigned int nthreads = Size(); int nblocks = (nthreads % tpb) ? nthreads / tpb + 1 : nthreads / tpb; EddyKernels::make_coordinates<<<nblocks,tpb>>>(_xn,_yn,_zn,XPtr(),YPtr(),ZPtr(),nthreads); EddyKernels::CudaSync("EddyKernels::make_coordinates"); _init = true; return; } EddyCatch thrust::device_vector<float> CudaImageCoordinates::repack_matrix(const NEWMAT::Matrix& A) EddyTry { thrust::host_vector<float> hA(12); hA[0] = A(1,1); hA[1] = A(1,2); hA[2] = A(1,3); hA[3] = A(1,4); hA[4] = A(2,1); hA[5] = A(2,2); hA[6] = A(2,3); hA[7] = A(2,4); hA[8] = A(3,1); hA[9] = A(3,2); hA[10] = A(3,3); hA[11] = A(3,4); return(hA); } EddyCatch thrust::device_vector<float> CudaImageCoordinates::repack_vector_of_matrices(const std::vector<NEWMAT::Matrix>& A) EddyTry { thrust::host_vector<float> hA(12*A.size()); for (unsigned int i=0; i<A.size(); i++) { unsigned int offs = 12*i; hA[offs] = A[i](1,1); hA[offs+1] = A[i](1,2); hA[offs+2] = A[i](1,3); hA[offs+3] = A[i](1,4); hA[offs+4] = A[i](2,1); hA[offs+5] = A[i](2,2); hA[offs+6] = A[i](2,3); hA[offs+7] = A[i](2,4); hA[offs+8] = A[i](3,1); hA[offs+9] = A[i](3,2); hA[offs+10] = A[i](3,3); hA[offs+11] = A[i](3,4); } return(hA); } EddyCatch
cd4060d1a5fc15c230254bb2aee09e36a1cd4eac.hip
// !!! This is a file automatically generated by hipify!!! //#include "lem.h" #include "erosion.h" #include <math.h> #include "device_constants.cuh" #include "config.h" #include "updates.h" #include "mfd_accum.h" #include <thrust/reduce.h> #include <iostream> #include "io.h" void fillthesinks(Data* data) { int ncell_y = data->mapInfo.height; int ncell_x = data->mapInfo.width; int self; int cellx, celly, dcell; int upslope, downslope,flatcount, sinkcounter; double lowest; double thiscellht, targetcellht; int xmove[9] = { 0,1,1,1,0,-1,-1,-1,0 }; int ymove[9] = { -1,-1,0,1,1,1,0,-1,0 }; int* dx; int* dy; dx = &xmove[0]; dy = &ymove[0]; data->dx = dx; data->dy = dy; sinkcounter = 0; for (int irow = 0; irow < ncell_y; irow++) //irow loop { for (int icol = 0; icol < ncell_x; icol++) //icol loop { self = irow * ncell_x + icol; if (data->mask[self] == 1) // in catchment { lowest = 2000; upslope = 0; downslope = 0; flatcount = 0; for (dcell = 0; dcell < 8; dcell++) { cellx = icol + data->dx[dcell]; celly = irow + data->dy[dcell]; targetcellht = data->dem[celly * ncell_x + cellx]; if (targetcellht != -9999) // dont look outside the grid { if ((data->dem[self]) < targetcellht) upslope++; if ((data->dem[self]) > targetcellht) downslope++; if ((data->dem[self]) == targetcellht) flatcount++; if (targetcellht < lowest) lowest = targetcellht; // find the lowest neighbour } } if (upslope > 7) { data->dem[self] = lowest; // +0.00000001; // fill the sink to level of lowest neighbour and create a slope sinkcounter++; } } } } data->dem[data->outletcellidx] = data->dem[data->outletcellidx] - 0.00012; printf("number of sinks filled %d \n", sinkcounter); write_double(data, data->dem, "filleddem.asc"); } void erosionGPU(Data* data, Data* device, int iter) { int ncell_x = data->mapInfo.width; int ncell_y = data->mapInfo.height; int full_size = ncell_x * ncell_y; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); if (hipSuccess != hipSetDevice(CUDA_DEVICE)){ printf("Unable to access CUDA card\n"); exit(0); } size_t freenow, total; fprintf(data->outlog, "MOD: Starting Model Process Routines \n"); //calc averageslope SlopePtr and transfer to device aveslope(data, device); calc_diff_erosion(data, device); thrust::device_ptr<double> difftot_d = thrust::device_pointer_cast(device->eroPtr); data->totE = thrust::reduce(difftot_d, difftot_d + full_size); fprintf(data->outlog, "total concentrated from thrust is %10.8lf \n", data->totE); printf("total concentrated from thrust is %f \n", data->totE); calc_conc_erosion(data, device); thrust::device_ptr<double> incitot_d = thrust::device_pointer_cast(device->inciPtr); hipSetDevice(0); data->totI = thrust::reduce(incitot_d, incitot_d + full_size, (double)0); fprintf(data->outlog, "total Incision from thrust is %10.8lf \n", data->totI); printf("total Incision from thrust is %10.8lf \n", data->totI); calc_gelifluction(data, device); thrust::device_ptr<double> gelitot_d = thrust::device_pointer_cast(device->geliPtr); hipSetDevice(0); data->totG = thrust::reduce(gelitot_d, gelitot_d + full_size, (double)0); fprintf(data->outlog, "total gelifluction from thrust is %10.8lf \n", data->totG); printf("total gelifluction from thrust is %10.8lf \n", data->totG); fflush(data->outlog); checkCudaErrors( hipMemcpy ( device->mask, data->mask, full_size * sizeof(int), hipMemcpyHostToDevice) ); sedmfdaccum(data, device); fprintf(data->outlog, "MOD: returned from sedmfdaccum :%s\n", hipGetErrorString(hipGetLastError())); checkCudaErrors( hipMemcpy ( data->geliPtr, device->geliPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->depoPtr, device->depoPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors(hipMemcpy(data->SlopePtr, device->SlopePtr, full_size * sizeof(double), hipMemcpyDeviceToHost)); fprintf(data->outlog, "MOD: ero/inc/dep/slope memcopy :%s\n", hipGetErrorString(hipGetLastError())); calc_dz(data,device); // now includes gelifluction erosion checkCudaErrors( hipMemcpy ( data->dz, device->dz, full_size * sizeof(double), hipMemcpyDeviceToHost) ); // Now add in weathering products and update cell calibre and cell moisture data calc_weathering(data, device); // now copy back all updated matrices checkCudaErrors( hipMemcpy ( data->finesPtr, device->finesPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->stonePtr, device->stonePtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->soilTPtr, device->soilTPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->soilMPtr, device->soilMPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->weatherC, device->weatherC, full_size * sizeof(double), hipMemcpyDeviceToHost) ); checkCudaErrors( hipMemcpy ( data->weatherP, device->weatherP, full_size * sizeof(double), hipMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: fines/stone/soilT/soilM/weatherC and P memcopy back :%s\n", hipGetErrorString(hipGetLastError())); // Now update the surface height update_newSurface(data, device, iter); // Now update the nutrients on surface and in soil profile update_nutrients(data, device); checkCudaErrors( hipMemcpy ( data->soilBPtr, device->soilBPtr, full_size * sizeof(double), hipMemcpyDeviceToHost)) ; checkCudaErrors( hipMemcpy ( data->nutPtr, device->nutPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: conc_soilB/nutB copyback :%s\n", hipGetErrorString(hipGetLastError())); fflush(data->outlog); // Now grow the vegetation update_vegetation(data,device); checkCudaErrors( hipMemcpy( data->TotBPtr, device->TotBPtr, full_size * sizeof(double), hipMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: mem copyback TotBn :%s\n", hipGetErrorString(hipGetLastError())); //checkCudaErrors(hipMemcpy(data->dem, device->dem, full_size * sizeof(double), hipMemcpyDeviceToHost)); //write_double(data, data->dem, "dem2.asc"); //fillthesinks(data); // use this until flooding is working //checkCudaErrors(hipMemcpy(device->dem, data->dem, full_size * sizeof(double), hipMemcpyHostToDevice)); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); #ifndef PRODUCTION_RUN printf("Time to complete model calculations %.6f s\n\n", time / 1000.0); #endif fprintf(data->outlog, "MOD: time to complete flow accumulation %.6f s\n", time / 1000.0); hipMemGetInfo(&freenow, &total); fprintf(data->outlog, "MOD: Memory on CUDA card free at end of erosion: %zd total: %zd\n\n",freenow/1024,total/1024); }
cd4060d1a5fc15c230254bb2aee09e36a1cd4eac.cu
//#include "lem.h" #include "erosion.h" #include <math.h> #include "device_constants.cuh" #include "config.h" #include "updates.h" #include "mfd_accum.h" #include <thrust/reduce.h> #include <iostream> #include "io.h" void fillthesinks(Data* data) { int ncell_y = data->mapInfo.height; int ncell_x = data->mapInfo.width; int self; int cellx, celly, dcell; int upslope, downslope,flatcount, sinkcounter; double lowest; double thiscellht, targetcellht; int xmove[9] = { 0,1,1,1,0,-1,-1,-1,0 }; int ymove[9] = { -1,-1,0,1,1,1,0,-1,0 }; int* dx; int* dy; dx = &xmove[0]; dy = &ymove[0]; data->dx = dx; data->dy = dy; sinkcounter = 0; for (int irow = 0; irow < ncell_y; irow++) //irow loop { for (int icol = 0; icol < ncell_x; icol++) //icol loop { self = irow * ncell_x + icol; if (data->mask[self] == 1) // in catchment { lowest = 2000; upslope = 0; downslope = 0; flatcount = 0; for (dcell = 0; dcell < 8; dcell++) { cellx = icol + data->dx[dcell]; celly = irow + data->dy[dcell]; targetcellht = data->dem[celly * ncell_x + cellx]; if (targetcellht != -9999) // dont look outside the grid { if ((data->dem[self]) < targetcellht) upslope++; if ((data->dem[self]) > targetcellht) downslope++; if ((data->dem[self]) == targetcellht) flatcount++; if (targetcellht < lowest) lowest = targetcellht; // find the lowest neighbour } } if (upslope > 7) { data->dem[self] = lowest; // +0.00000001; // fill the sink to level of lowest neighbour and create a slope sinkcounter++; } } } } data->dem[data->outletcellidx] = data->dem[data->outletcellidx] - 0.00012; printf("number of sinks filled %d \n", sinkcounter); write_double(data, data->dem, "filleddem.asc"); } void erosionGPU(Data* data, Data* device, int iter) { int ncell_x = data->mapInfo.width; int ncell_y = data->mapInfo.height; int full_size = ncell_x * ncell_y; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); if (cudaSuccess != cudaSetDevice(CUDA_DEVICE)){ printf("Unable to access CUDA card\n"); exit(0); } size_t freenow, total; fprintf(data->outlog, "MOD: Starting Model Process Routines \n"); //calc averageslope SlopePtr and transfer to device aveslope(data, device); calc_diff_erosion(data, device); thrust::device_ptr<double> difftot_d = thrust::device_pointer_cast(device->eroPtr); data->totE = thrust::reduce(difftot_d, difftot_d + full_size); fprintf(data->outlog, "total concentrated from thrust is %10.8lf \n", data->totE); printf("total concentrated from thrust is %f \n", data->totE); calc_conc_erosion(data, device); thrust::device_ptr<double> incitot_d = thrust::device_pointer_cast(device->inciPtr); cudaSetDevice(0); data->totI = thrust::reduce(incitot_d, incitot_d + full_size, (double)0); fprintf(data->outlog, "total Incision from thrust is %10.8lf \n", data->totI); printf("total Incision from thrust is %10.8lf \n", data->totI); calc_gelifluction(data, device); thrust::device_ptr<double> gelitot_d = thrust::device_pointer_cast(device->geliPtr); cudaSetDevice(0); data->totG = thrust::reduce(gelitot_d, gelitot_d + full_size, (double)0); fprintf(data->outlog, "total gelifluction from thrust is %10.8lf \n", data->totG); printf("total gelifluction from thrust is %10.8lf \n", data->totG); fflush(data->outlog); checkCudaErrors( cudaMemcpy ( device->mask, data->mask, full_size * sizeof(int), cudaMemcpyHostToDevice) ); sedmfdaccum(data, device); fprintf(data->outlog, "MOD: returned from sedmfdaccum :%s\n", cudaGetErrorString(cudaGetLastError())); checkCudaErrors( cudaMemcpy ( data->geliPtr, device->geliPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->depoPtr, device->depoPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors(cudaMemcpy(data->SlopePtr, device->SlopePtr, full_size * sizeof(double), cudaMemcpyDeviceToHost)); fprintf(data->outlog, "MOD: ero/inc/dep/slope memcopy :%s\n", cudaGetErrorString(cudaGetLastError())); calc_dz(data,device); // now includes gelifluction erosion checkCudaErrors( cudaMemcpy ( data->dz, device->dz, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); // Now add in weathering products and update cell calibre and cell moisture data calc_weathering(data, device); // now copy back all updated matrices checkCudaErrors( cudaMemcpy ( data->finesPtr, device->finesPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->stonePtr, device->stonePtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->soilTPtr, device->soilTPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->soilMPtr, device->soilMPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->weatherC, device->weatherC, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); checkCudaErrors( cudaMemcpy ( data->weatherP, device->weatherP, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: fines/stone/soilT/soilM/weatherC and P memcopy back :%s\n", cudaGetErrorString(cudaGetLastError())); // Now update the surface height update_newSurface(data, device, iter); // Now update the nutrients on surface and in soil profile update_nutrients(data, device); checkCudaErrors( cudaMemcpy ( data->soilBPtr, device->soilBPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost)) ; checkCudaErrors( cudaMemcpy ( data->nutPtr, device->nutPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: conc_soilB/nutB copyback :%s\n", cudaGetErrorString(cudaGetLastError())); fflush(data->outlog); // Now grow the vegetation update_vegetation(data,device); checkCudaErrors( cudaMemcpy( data->TotBPtr, device->TotBPtr, full_size * sizeof(double), cudaMemcpyDeviceToHost) ); fprintf(data->outlog, "MOD: mem copyback TotBn :%s\n", cudaGetErrorString(cudaGetLastError())); //checkCudaErrors(cudaMemcpy(data->dem, device->dem, full_size * sizeof(double), cudaMemcpyDeviceToHost)); //write_double(data, data->dem, "dem2.asc"); //fillthesinks(data); // use this until flooding is working //checkCudaErrors(cudaMemcpy(device->dem, data->dem, full_size * sizeof(double), cudaMemcpyHostToDevice)); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); #ifndef PRODUCTION_RUN printf("Time to complete model calculations %.6f s\n\n", time / 1000.0); #endif fprintf(data->outlog, "MOD: time to complete flow accumulation %.6f s\n", time / 1000.0); cudaMemGetInfo(&freenow, &total); fprintf(data->outlog, "MOD: Memory on CUDA card free at end of erosion: %zd total: %zd\n\n",freenow/1024,total/1024); }
2341a0e9105914e2d09927fb2ce678612c277ebf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Includes, system #define ulong4 uint4 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include "sys/time.h" // includes, kernels #include "common.hip" #include "mummergpu.h" #include "mummergpu_kernel.cu" #define BLOCKSIZE 256 /*#define CUDA_SAFE_CALL( call) do { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0)*/ //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* height, AuxiliaryNodeData** aux_data, int* num_nodes, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, size_t device_memory_avail, int min_match_length, bool rc); void printAlignments(char* ref, ReferencePage* page, char* query, int qrylen, int nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); // Timer management struct Timer_t { struct timeval start_m; struct timeval end_m; }; void createTimer(unsigned int * timer) { unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t)); memset(ptr, 0, sizeof(struct Timer_t)); *timer = (unsigned int)(unsigned long long) ptr; } void startTimer(unsigned int ptr) { printf("in timer\n"); gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL); } void stopTimer(unsigned int ptr) { gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL); } float getTimerValue(unsigned int ptr) { Timer_t * timer = (Timer_t*) ptr; if (timer == NULL) { fprintf(stderr, "Uninitialized timer!!!\n"); return 0.0; } if (timer->end_m.tv_sec == 0) { stopTimer(ptr); } return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) + (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec))); } void deleteTimer(unsigned int ptr) { free((Timer_t *)ptr); } extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; getReferenceString(fromFile, &(ref->str), &(ref->len)); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); free(ref->h_ref_tex_array); free(ref->aux_data); ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, MUMMERGPU_OPTIONS options, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; // break out options here ctx->on_cpu = options & ON_CPU; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int height = 0; unsigned int width = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; ref->len = end - begin + 3; ref->str = (char*)malloc(ref->len); ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &height, &aux_data, &num_nodes, NULL, NULL); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_height = height; ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = width * height * (sizeof(PixelOfNode) + sizeof(PixelOfChildren)); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; ref->h_ref_tex_array = (char *) malloc(numrows*refpitch); ref->bytes_on_board += numrows*refpitch; int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_tex_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); int bigy = i / (block_dim); y = bigy*blocksize+bigx%blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_tex_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } } void loadReferenceTexture(MatchContext* ctx) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; hipChannelFormatDesc refTextureDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindSigned); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); fprintf(stderr, "allocating reference texture\n"); CUDA_SAFE_CALL(hipMallocArray( (hipArray**)(&ref->d_ref_tex_array), &refTextureDesc, ref->pitch, numrows)); //ref->bytes_on_board += ref->pitch * numrows; CUDA_SAFE_CALL(hipMemcpyToArray( (hipArray*)(ref->d_ref_tex_array), 0, 0, ref->h_ref_tex_array, numrows*ref->pitch, hipMemcpyHostToDevice)); reftex.addressMode[0] = hipAddressModeClamp; reftex.addressMode[1] = hipAddressModeClamp; reftex.filterMode = hipFilterModePoint; reftex.normalized = false; CUDA_SAFE_CALL(hipBindTextureToArray( reftex, (hipArray*)ref->d_ref_tex_array, refTextureDesc)); //stopTimer(toboardtimer); //ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ref->d_ref_tex_array = NULL; } fprintf(stderr,"done\n"); } void unloadReferenceTexture(Reference* ref) { CUDA_SAFE_CALL(hipUnbindTexture( reftex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_ref_tex_array))); ref->d_ref_tex_array = NULL; } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx) { Reference* ref = ctx->ref; //ref->bytes_on_board = 0; loadReferenceTexture(ctx); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); hipChannelFormatDesc nodeTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_SAFE_CALL( hipMallocArray( (hipArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_height )); //ref->bytes_on_board += ref->tex_width * ref->tex_height * (sizeof(PixelOfNode)); CUDA_SAFE_CALL( hipMemcpyToArray( (hipArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_height * sizeof(PixelOfNode), hipMemcpyHostToDevice)); nodetex.addressMode[0] = hipAddressModeClamp; nodetex.addressMode[1] = hipAddressModeClamp; nodetex.filterMode = hipFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates CUDA_SAFE_CALL( hipBindTextureToArray( nodetex, (hipArray*)ref->d_node_tex_array, nodeTextureDesc)); hipChannelFormatDesc childrenTextureDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindUnsigned); CUDA_SAFE_CALL( hipMallocArray( (hipArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_height )); //ref->bytes_on_board += ref->tex_width * ref->tex_height * sizeof(PixelOfNode); CUDA_SAFE_CALL( hipMemcpyToArray((hipArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_height * sizeof(PixelOfChildren), hipMemcpyHostToDevice)); childrentex.addressMode[0] = hipAddressModeClamp; childrentex.addressMode[1] = hipAddressModeClamp; childrentex.filterMode = hipFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates CUDA_SAFE_CALL( hipBindTextureToArray( childrentex, (hipArray*)(ref->d_children_tex_array), childrenTextureDesc)); fprintf(stderr, "done\n"); //stopTimer(toboardtimer); //ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void unloadReference(MatchContext* ctx) { Reference* ref = ctx->ref; CUDA_SAFE_CALL(hipUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_node_tex_array))); ref->d_node_tex_array = NULL; CUDA_SAFE_CALL(hipUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(hipFreeArray((hipArray*)(ref->d_children_tex_array))); ref->d_children_tex_array = NULL; unloadReferenceTexture(ctx->ref); } void loadQueries(MatchContext* ctx,hipStream_t stream_app) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "loadQueries on GPU: Allocating device memory for queries...\n"); CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_tex_array, queries->texlen)); queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( hipMemcpyAsync((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, hipMemcpyHostToDevice, stream_app)); CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int))); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpyAsync((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), hipMemcpyHostToDevice, stream_app)); CUDA_SAFE_CALL( hipMalloc((void**) &queries->d_lengths_array, numQueries * sizeof(int))); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( hipMemcpyAsync((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), hipMemcpyHostToDevice, stream_app)); fprintf(stderr, "loadQueries on GPU: allocated %ld bytes done\n", 2 * numQueries*sizeof(int) + queries->texlen); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, "loadQueries on CPU: allocated %ld bytes done\n", numQueries*sizeof(int) + queries->texlen); } //stopTimer(toboardtimer); //ctx->statistics.t_to_board += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(hipFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(hipFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); int match_length = ctx->min_match_length; unsigned int numCoords = 0; numCoords = ctx->queries->texlen - numQueries * (match_length + 1); ctx->results.numCoords = numCoords; fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",numQueries, numCoords*sizeof(MatchCoord) ); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); ctx->results.bytes_on_board = 0; CUDA_SAFE_CALL( hipMalloc( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord))); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( hipMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); //stopTimer(toboardtimer); //ctx->statistics.t_to_board += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(hipFree(ctx->results.d_match_coords)); ctx->results.bytes_on_board = 0; } void freeResults(MatchContext* ctx, ReferencePage pages[], unsigned int num_pages) { for (int i = 0; i < num_pages; ++i) { free(pages[i].results.h_match_coords); } } void transferResultsFromDevice(MatchContext* ctx,hipStream_t stream_app) { if (!ctx->on_cpu) { //unsigned int fromboardtimer = 0; //createTimer(&fromboardtimer); //startTimer(fromboardtimer); CUDA_SAFE_CALL(hipMemcpyAsync(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), hipMemcpyDeviceToHost, stream_app) ); //stopTimer(fromboardtimer); //ctx->statistics.t_from_board += getTimerValue(fromboardtimer); //deleteTimer(fromboardtimer); } } int flushOutput(); int addToBuffer(char* string); inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } #define MAX_QUERY_LEN 8192 struct packed_slot { unsigned short page; unsigned short qpos; MatchCoord coord; }; struct packed_slot_array { packed_slot* slots; unsigned int num_slots; }; void addPackedOutput(MatchContext* ctx, packed_slot_array** curr_output, packed_slot_array slot_array[]) { unsigned int numQueries = ctx->queries->count; if (*curr_output == NULL) { *curr_output = slot_array; } else { for (int i = 0; i < numQueries; i++) { if (slot_array[i].num_slots) { //packed_slot_array* slots = &(slot_array[i]); (*curr_output)[i].slots = (packed_slot*)realloc((*curr_output)[i].slots, ((*curr_output)[i].num_slots + slot_array[i].num_slots) * sizeof(packed_slot)); memcpy((*curr_output)[i].slots + (*curr_output)[i].num_slots, slot_array[i].slots, slot_array[i].num_slots * sizeof(packed_slot)); (*curr_output)[i].num_slots += slot_array[i].num_slots; free(slot_array[i].slots); } } free(slot_array); } } char numbuffer[32]; void printRCSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) { char* h_tex_array = ctx->queries->h_tex_array; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int qrylen = ctx->queries->h_lengths_array[qry]; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + qry)); addToBuffer(" Reverse"); if (ctx->show_query_length) { addToBuffer(" Len = "); sprintf(numbuffer, "%d", qrylen); addToBuffer(numbuffer); } addToBuffer("\n"); for (int j = 0; j < slots->num_slots; ++j) { packed_slot slot = slots->slots[j]; if (slot.coord.edge_match_length & FRMASK) { printAlignments(ctx->full_ref, &(pages[slot.page]), h_tex_array + h_addrs_tex_array[qry], qrylen, slot.coord.node, slot.qpos, (slot.coord.edge_match_length & FRUMASK), ctx->min_match_length, 1, ctx->forwardcoordinates); } } } int FOO; void printForwardSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) { char* h_tex_array = ctx->queries->h_tex_array; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int qrylen = ctx->queries->h_lengths_array[qry]; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + qry)); if (ctx->show_query_length) { addToBuffer(" Len = "); sprintf(numbuffer, "%d", qrylen); addToBuffer(numbuffer); } addToBuffer("\n"); for (int j = 0; j < slots->num_slots; ++j) { packed_slot slot = slots->slots[j]; if (!(slot.coord.edge_match_length & FRMASK)) { printAlignments(ctx->full_ref, &(pages[slot.page]), h_tex_array + h_addrs_tex_array[qry], qrylen, slot.coord.node, slot.qpos, slot.coord.edge_match_length, ctx->min_match_length, 0, ctx->forwardcoordinates); } } FOO += slots->num_slots; } void printPackedResults(MatchContext* ctx, ReferencePage pages[], packed_slot_array slot_array[]) { unsigned int numQueries = ctx->queries->count; FOO = 0; for (int qry = 0; qry < numQueries; qry++) { packed_slot_array* slots = &(slot_array[qry]); if (ctx->reverse) { printRCSlots(ctx, pages, qry, slots); } else { printForwardSlots(ctx, pages, qry, slots); if (ctx->forwardreverse) { printRCSlots(ctx, pages, qry, slots); } } } printf("FOO = %d\n", FOO); flushOutput(); } void packSlots(MatchContext* ctx, MatchResults* results, unsigned int page_num, packed_slot_array** slot_arrays, bool rc) { unsigned int numQueries = ctx->queries->count; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int match_length = ctx->min_match_length; *slot_arrays = (packed_slot_array*)calloc(numQueries, sizeof(packed_slot_array)); for (int i = 0; i < numQueries; i++) { int qlen; if (i == numQueries - 1) qlen = ctx->queries->texlen - h_addrs_tex_array[i] - match_length; else qlen = h_addrs_tex_array[i + 1] - h_addrs_tex_array[i] - match_length; packed_slot* qslots = (packed_slot*)calloc(qlen, sizeof(packed_slot)); int filled = 0; for (int p = 0; p < qlen; ++p) { MatchCoord* coords = results->h_match_coords; int query_coord_begin = match_coord_addrs(i, h_addrs_tex_array[i], match_length); int query_coord_end = i < numQueries - 1 ? match_coord_addrs(i + 1, h_addrs_tex_array[i + 1], match_length) : results->numCoords; int query_coord = query_coord_begin + p; if ((query_coord < query_coord_end) && (coords[query_coord].node > 1) && (!(coords[query_coord].edge_match_length & FRMASK) == !rc)) { packed_slot s; s.page = page_num; s.qpos = p; s.coord = coords[query_coord]; qslots[filled++] = s; } } if (filled) { packed_slot* pslots = (packed_slot*)calloc(filled, sizeof(packed_slot)); memcpy(pslots, qslots, (filled)*sizeof(packed_slot)); (*slot_arrays)[i].slots = pslots; (*slot_arrays)[i].num_slots = filled; } else { (*slot_arrays)[i].slots = NULL; (*slot_arrays)[i].num_slots = 0; } free(qslots); } } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; size_t queryLen; char** names; //unsigned int queryreadtimer = 0; //createTimer(&queryreadtimer); //startTimer(queryreadtimer); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); //stopTimer(queryreadtimer); //ctx->statistics.t_query_read += getTimerValue(queryreadtimer); //deleteTimer(queryreadtimer); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void writeStatisticsFile(MatchContext* ctx, char* stats_filename) { if (!stats_filename) return; FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); return; } fprintf(f, "Total,%f\n", ctx->statistics.t_total); fprintf(f, "Kernel,%f\n", ctx->statistics.t_kernel); fprintf(f, "Print matches,%f\n", ctx->statistics.t_output); fprintf(f, "Copy queries to GPU,%f\n", ctx->statistics.t_to_board); fprintf(f, "Copy output from GPU,%f\n", ctx->statistics.t_from_board); fprintf(f, "Copy suffix tree to GPU,%f\n", ctx->statistics.t_moving_tree_pages); fprintf(f, "Read queries from disk,%f\n", ctx->statistics.t_query_read); fprintf(f, "Suffix tree constructions,%f\n", ctx->statistics.t_construction); fprintf(f, "Minimum substring length, %d\n", ctx->min_match_length); fprintf(f, "Average query length, %f\n", ctx->statistics.bp_avg_query_length); fclose(f); } int matchSubset(MatchContext* ctx, int query_block_offset, ReferencePage pages[], unsigned int num_pages,hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { loadQueries(ctx, stream_app); packed_slot_array* packed_slots = NULL; for (unsigned int i = 0; i < num_pages; ++i) { ctx->ref = &(pages[i].ref); loadReference(ctx); loadResultBuffer(ctx); //unsigned int ktimer = 0; //createTimer(&ktimer); unsigned int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize,1,1); dim3 dimGrid(ceil(numQueries/(float)BLOCKSIZE), 1, 1); if (!ctx->on_cpu) { fprintf(stderr,"Using blocks of %d x %d x %d threads\n", dimBlock.x, dimBlock.y, dimBlock.z); fprintf(stderr,"Using a grid of %d x %d x %d blocks\n", dimGrid.x, dimGrid.y, dimBlock.z); fprintf(stderr,"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); } //startTimer(ktimer); bool alignRC = ctx->reverse; if (ctx->on_cpu) { if (alignRC) { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } else { pthread_mutex_lock (mutexapp); if (alignRC) { hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0 ,stream_app, ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { hipLaunchKernelGGL(( mummergpuKernel), dim3(dimGrid), dim3(dimBlock), 0 , stream_app, ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } pthread_mutex_unlock (mutexapp); } if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); // check if kernel execution generated an error hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //stopTimer(ktimer); //float ktime = getTimerValue(ktimer); //ctx->statistics.t_kernel += ktime; // fprintf(stderr,"kernel time= %f\n", ktime); //deleteTimer(ktimer); transferResultsFromDevice(ctx, stream_app); pages[i].results = ctx->results; packed_slot_array* packed; packSlots(ctx, &(pages[i].results), i, &packed, ctx->reverse); addPackedOutput(ctx, &packed_slots, packed); // now compute the reverse matches. if (ctx->forwardreverse) { //unsigned int rctimer = 0; //createTimer(&rctimer); //startTimer(rctimer); if (ctx->on_cpu) { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { pthread_mutex_lock (mutexapp); hipLaunchKernelGGL(( mummergpuRCKernel), dim3(dimGrid), dim3(dimBlock), 0 , stream_app, ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); else hipDeviceSynchronize(); } //stopTimer(rctimer); //float rctime = getTimerValue(rctimer); //ctx->statistics.t_kernel += rctime; //fprintf(stderr,"rc kernel time= %f\n", rctime); //deleteTimer(rctimer); transferResultsFromDevice(ctx, stream_app); if(flag) cutilSafeCall( hipStreamSynchronize(stream_app) ); pages[i].results = ctx->results; packed_slot_array* packed; packSlots(ctx, &(pages[i].results), i, &packed, 1); addPackedOutput(ctx, &packed_slots, packed); } free(pages[i].results.h_match_coords); pages[i].results.h_match_coords = NULL; unloadReference(ctx); unloadResultBuffer(ctx); } //unsigned int otimer = 0; //createTimer(&otimer); //startTimer(otimer); printPackedResults(ctx, pages, packed_slots); //stopTimer(otimer); //ctx->statistics.t_output += getTimerValue(otimer); //deleteTimer(otimer); for (int i = 0; i < ctx->queries->count; ++i) { free(packed_slots[i].slots); } free(packed_slots); unloadQueries(ctx); return 0; } #define BREATHING_ROOM (64 * 1024 * 1024) #define BASES_PER_TREE_PAGE 7500000 #define CHUMP_CHANGE 1500000 extern "C" int matchQueries(MatchContext* ctx,hipStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { assert(sizeof(struct PixelOfNode) == sizeof(ulong4)); assert(sizeof(struct PixelOfChildren) == sizeof(ulong4)); ctx->statistics.t_kernel = 0.0; ctx->statistics.t_output = 0.0; ctx->statistics.t_to_board = 0.0; ctx->statistics.t_from_board = 0.0; ctx->statistics.t_moving_tree_pages = 0.0; ctx->statistics.t_query_read = 0.0; ctx->statistics.t_total = 0.0; ctx->statistics.t_construction = 0.0; ctx->statistics.bp_avg_query_length = 0.0; //unsigned int ttimer = 0; //createTimer(&ttimer); //startTimer(ttimer); //unsigned int ctimer = 0; //createTimer(&ctimer); //startTimer(ctimer); unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = bases_in_ref / page_size; ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages, sizeof(ReferencePage)); unsigned int page_overlap = MAX_QUERY_LEN + 1; pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end); for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end); } //stopTimer(ctimer); //ctx->statistics.t_construction += getTimerValue(ctimer); //deleteTimer(ctimer); hipDeviceProp_t props; if (!ctx->on_cpu) { int deviceCount = 0; hipGetDeviceCount(&deviceCount); if (deviceCount != 1) { //fprintf(stderr, "Fatal error: no CUDA-capable device found, exiting\n"); //return -1; } hipGetDeviceProperties(&props, 0); fprintf(stderr, "Running under CUDA %d.%d\n", props.major, props.minor); fprintf(stderr, "CUDA device has %d bytes of memory\n", props.totalGlobalMem); } else { props.totalGlobalMem = 804585472; // pretend we are on a 8800 GTX } size_t mem_avail = 0; for (int i = 0; i < num_reference_pages; ++i) { mem_avail = max((unsigned int)pages[i].ref.bytes_on_board, (unsigned int)mem_avail); } mem_avail = props.totalGlobalMem - mem_avail; fprintf(stderr, "There are %d bytes left on the board\n", mem_avail); mem_avail -= BREATHING_ROOM; while (getQueryBlock(ctx, mem_avail)) { pthread_mutex_unlock (mutexapp); matchSubset(ctx, 0, pages, num_reference_pages,stream_app,mutexapp,flag); ctx->statistics.bp_avg_query_length = ctx->queries->texlen/(float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); //hipDeviceReset(); } for (int i = 0; i < num_reference_pages; ++i) { destroyReference(&(pages[i].ref)); } free(pages); //stopTimer(ttimer); //ctx->statistics.t_total += getTimerValue(ttimer); //deleteTimer(ttimer); writeStatisticsFile(ctx, ctx->stats_file); return 0; }
2341a0e9105914e2d09927fb2ce678612c277ebf.cu
// Includes, system #define ulong4 uint4 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include "sys/time.h" // includes, kernels #include "common.cu" #include "mummergpu.h" #include "mummergpu_kernel.cu" #define BLOCKSIZE 256 /*#define CUDA_SAFE_CALL( call) do { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } while (0)*/ //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); extern "C" void computeGold(MatchResults* results, char* refstr, char* queries, int* queryAddrs, int* queryLengths, PixelOfNode* nodeTexture, PixelOfChildren* childrenTexture, int numQueries, int mismatch_length, int rc); extern "C" void getReferenceString(const char * filename, char** refstr, size_t* reflen); extern "C" void createTreeTexture(const char * filename, PixelOfNode** nodeTexture, PixelOfChildren** childrenTexture, unsigned int* width, unsigned int* height, AuxiliaryNodeData** aux_data, int* num_nodes, const char * dotfilename, const char * texfilename); extern "C" void getQueriesTexture(int qfile, char** queryTexture, size_t* queryLength, int** queryAddrs, char*** queryNames, int** queryLengths, unsigned int* numQueries, size_t device_memory_avail, int min_match_length, bool rc); void printAlignments(char* ref, ReferencePage* page, char* query, int qrylen, int nodeid, int qrypos, int edge_depth, int min_match, bool rc, bool forwardcoordinates); int countLeafNodes(int nodeid); // Timer management struct Timer_t { struct timeval start_m; struct timeval end_m; }; void createTimer(unsigned int * timer) { unsigned int * ptr = (unsigned int *) malloc(sizeof(struct Timer_t)); memset(ptr, 0, sizeof(struct Timer_t)); *timer = (unsigned int)(unsigned long long) ptr; } void startTimer(unsigned int ptr) { printf("in timer\n"); gettimeofday(&(((struct Timer_t *)ptr)->start_m), NULL); } void stopTimer(unsigned int ptr) { gettimeofday(&(((struct Timer_t *)ptr)->end_m), NULL); } float getTimerValue(unsigned int ptr) { Timer_t * timer = (Timer_t*) ptr; if (timer == NULL) { fprintf(stderr, "Uninitialized timer!!!\n"); return 0.0; } if (timer->end_m.tv_sec == 0) { stopTimer(ptr); } return (float) (1000.0 * (timer->end_m.tv_sec - timer->start_m.tv_sec) + (0.001 * (timer->end_m.tv_usec - timer->start_m.tv_usec))); } void deleteTimer(unsigned int ptr) { free((Timer_t *)ptr); } extern "C" int createReference(const char* fromFile, Reference* ref) { if (!fromFile || !ref) return -1; getReferenceString(fromFile, &(ref->str), &(ref->len)); return 0; } extern "C" int destroyReference(Reference* ref) { free(ref->h_node_tex_array); free(ref->h_children_tex_array); free(ref->str); free(ref->h_ref_tex_array); free(ref->aux_data); ref->str = NULL; ref->len = 0; return 0; } extern "C" int createQuerySet(const char* fromFile, QuerySet* queries) { fprintf(stderr, "Opening %s...\n", fromFile); int qfile = open(fromFile, O_RDONLY); if (qfile == -1) { fprintf(stderr, "Can't open %s: %d\n", fromFile, errno); exit (1); } queries->qfile = qfile; return 0; } extern "C" int destroyQuerySet(QuerySet* queries) { if (queries->qfile) close(queries->qfile); return 0; } extern "C" void printStringForError(int err) { } extern "C" int createMatchContext(Reference* ref, QuerySet* queries, MatchResults* matches, MUMMERGPU_OPTIONS options, int min_match_length, char* stats_file, bool reverse, bool forwardreverse, bool forwardcoordinates, bool showQueryLength, MatchContext* ctx) { ctx->queries = queries; ctx->ref = ref; ctx->full_ref = ref->str; ctx->full_ref_len = ref->len; // break out options here ctx->on_cpu = options & ON_CPU; ctx->min_match_length = min_match_length; ctx->stats_file = stats_file; ctx->reverse = reverse; ctx->forwardreverse = forwardreverse; ctx->forwardcoordinates = forwardcoordinates; ctx->show_query_length = showQueryLength; return 0; } extern "C" int destroyMatchContext(MatchContext* ctx) { free(ctx->full_ref); //destroyReference(ctx->ref); destroyQuerySet(ctx->queries); return 0; } void buildReferenceTexture(Reference* ref, char* full_ref, size_t begin, size_t end) { fprintf(stderr, "Building reference texture...\n"); PixelOfNode* nodeTexture = NULL; PixelOfChildren * childrenTexture = NULL; unsigned int height = 0; unsigned int width = 0; AuxiliaryNodeData* aux_data = NULL; int num_nodes; ref->len = end - begin + 3; ref->str = (char*)malloc(ref->len); ref->str[0] = 's'; strncpy(ref->str + 1, full_ref + begin, ref->len - 3); strcpy(ref->str + ref->len - 2, "$"); createTreeTexture(ref->str, &nodeTexture, &childrenTexture, &width, &height, &aux_data, &num_nodes, NULL, NULL); ref->h_node_tex_array = nodeTexture; ref->h_children_tex_array = childrenTexture; ref->tex_width = width; ref->tex_height = height; ref->aux_data = aux_data; ref->num_nodes = num_nodes; ref->bytes_on_board = width * height * (sizeof(PixelOfNode) + sizeof(PixelOfChildren)); unsigned int refpitch = ref->pitch = 65536; int numrows = ceil(ref->len / ((float)refpitch)); int blocksize = 4; numrows += blocksize; ref->h_ref_tex_array = (char *) malloc(numrows*refpitch); ref->bytes_on_board += numrows*refpitch; int z_max = numrows * refpitch; for (int z = 0; z < z_max; z++) { ref->h_ref_tex_array[z] = 'Z'; } int x, y; int maxx = 0, maxy = 0; size_t reflen = ref->len; char* refstr = ref->str; int block_dim = refpitch * blocksize; for (int i = 0; i < reflen; i++) { int bigx = i % (block_dim); int bigy = i / (block_dim); y = bigy*blocksize+bigx%blocksize; x = bigx / blocksize; // printf("%d: (%d,%d)=%c\n", i, x, y, refstr[i]); assert(x < refpitch); assert(y < numrows); ref->h_ref_tex_array[y*refpitch+x] = refstr[i]; if (x > maxx) { maxx = x; } if (y > maxy) { maxy = y; } } if ((maxx >= refpitch) || (maxy >= numrows)) { fprintf(stderr, "ERROR: maxx: %d refpitch: %d, maxy: %d numrows: %d\n", maxx, refpitch, maxy, numrows); exit(1); } } void loadReferenceTexture(MatchContext* ctx) { Reference* ref = ctx->ref; int numrows = ceil(ref->len / ((float)ref->pitch)); int blocksize = 4; numrows += blocksize; cudaChannelFormatDesc refTextureDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindSigned); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); fprintf(stderr, "allocating reference texture\n"); CUDA_SAFE_CALL(cudaMallocArray( (cudaArray**)(&ref->d_ref_tex_array), &refTextureDesc, ref->pitch, numrows)); //ref->bytes_on_board += ref->pitch * numrows; CUDA_SAFE_CALL(cudaMemcpyToArray( (cudaArray*)(ref->d_ref_tex_array), 0, 0, ref->h_ref_tex_array, numrows*ref->pitch, cudaMemcpyHostToDevice)); reftex.addressMode[0] = cudaAddressModeClamp; reftex.addressMode[1] = cudaAddressModeClamp; reftex.filterMode = cudaFilterModePoint; reftex.normalized = false; CUDA_SAFE_CALL(cudaBindTextureToArray( reftex, (cudaArray*)ref->d_ref_tex_array, refTextureDesc)); //stopTimer(toboardtimer); //ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ref->d_ref_tex_array = NULL; } fprintf(stderr,"done\n"); } void unloadReferenceTexture(Reference* ref) { CUDA_SAFE_CALL(cudaUnbindTexture( reftex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_ref_tex_array))); ref->d_ref_tex_array = NULL; } //loads a tree and text for [begin, end) in the reference void loadReference(MatchContext* ctx) { Reference* ref = ctx->ref; //ref->bytes_on_board = 0; loadReferenceTexture(ctx); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); cudaChannelFormatDesc nodeTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_SAFE_CALL( cudaMallocArray( (cudaArray**)(&ref->d_node_tex_array), &nodeTextureDesc, ref->tex_width, ref->tex_height )); //ref->bytes_on_board += ref->tex_width * ref->tex_height * (sizeof(PixelOfNode)); CUDA_SAFE_CALL( cudaMemcpyToArray( (cudaArray*)(ref->d_node_tex_array), 0, 0, ref->h_node_tex_array, ref->tex_width * ref->tex_height * sizeof(PixelOfNode), cudaMemcpyHostToDevice)); nodetex.addressMode[0] = cudaAddressModeClamp; nodetex.addressMode[1] = cudaAddressModeClamp; nodetex.filterMode = cudaFilterModePoint; nodetex.normalized = false; // access with normalized texture coordinates CUDA_SAFE_CALL( cudaBindTextureToArray( nodetex, (cudaArray*)ref->d_node_tex_array, nodeTextureDesc)); cudaChannelFormatDesc childrenTextureDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindUnsigned); CUDA_SAFE_CALL( cudaMallocArray( (cudaArray**)(&ref->d_children_tex_array), &childrenTextureDesc, ref->tex_width, ref->tex_height )); //ref->bytes_on_board += ref->tex_width * ref->tex_height * sizeof(PixelOfNode); CUDA_SAFE_CALL( cudaMemcpyToArray((cudaArray*)(ref->d_children_tex_array), 0, 0, ref->h_children_tex_array, ref->tex_width * ref->tex_height * sizeof(PixelOfChildren), cudaMemcpyHostToDevice)); childrentex.addressMode[0] = cudaAddressModeClamp; childrentex.addressMode[1] = cudaAddressModeClamp; childrentex.filterMode = cudaFilterModePoint; childrentex.normalized = false; // access with normalized texture coordinates CUDA_SAFE_CALL( cudaBindTextureToArray( childrentex, (cudaArray*)(ref->d_children_tex_array), childrenTextureDesc)); fprintf(stderr, "done\n"); //stopTimer(toboardtimer); //ctx->statistics.t_moving_tree_pages += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ref->d_node_tex_array = NULL; ref->d_children_tex_array = NULL; } } void unloadReference(MatchContext* ctx) { Reference* ref = ctx->ref; CUDA_SAFE_CALL(cudaUnbindTexture( nodetex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_node_tex_array))); ref->d_node_tex_array = NULL; CUDA_SAFE_CALL(cudaUnbindTexture( childrentex ) ); CUDA_SAFE_CALL(cudaFreeArray((cudaArray*)(ref->d_children_tex_array))); ref->d_children_tex_array = NULL; unloadReferenceTexture(ctx->ref); } void loadQueries(MatchContext* ctx,cudaStream_t stream_app) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); QuerySet* queries = ctx->queries; queries->bytes_on_board = 0; unsigned int numQueries = queries->count; if (!ctx->on_cpu) { fprintf(stderr, "loadQueries on GPU: Allocating device memory for queries...\n"); CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_tex_array, queries->texlen)); queries->bytes_on_board += queries->texlen; CUDA_SAFE_CALL( cudaMemcpyAsync((void*) queries->d_tex_array, queries->h_tex_array + queries->h_addrs_tex_array[0], queries->texlen, cudaMemcpyHostToDevice, stream_app)); CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_addrs_tex_array, numQueries * sizeof(int))); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpyAsync((void*) queries->d_addrs_tex_array, queries->h_addrs_tex_array, numQueries * sizeof(int), cudaMemcpyHostToDevice, stream_app)); CUDA_SAFE_CALL( cudaMalloc((void**) &queries->d_lengths_array, numQueries * sizeof(int))); queries->bytes_on_board += numQueries * sizeof(int); CUDA_SAFE_CALL( cudaMemcpyAsync((void*) queries->d_lengths_array, queries->h_lengths_array, numQueries * sizeof(int), cudaMemcpyHostToDevice, stream_app)); fprintf(stderr, "loadQueries on GPU: allocated %ld bytes done\n", 2 * numQueries*sizeof(int) + queries->texlen); } else { queries->d_addrs_tex_array = NULL; queries->d_tex_array = NULL; queries->d_lengths_array = NULL; fprintf(stderr, "loadQueries on CPU: allocated %ld bytes done\n", numQueries*sizeof(int) + queries->texlen); } //stopTimer(toboardtimer); //ctx->statistics.t_to_board += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } void unloadQueries(MatchContext* ctx) { QuerySet* queries = ctx->queries; CUDA_SAFE_CALL(cudaFree(queries->d_tex_array)); queries->d_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_addrs_tex_array)); queries->d_addrs_tex_array = NULL; CUDA_SAFE_CALL(cudaFree(queries->d_lengths_array)); queries->d_lengths_array = NULL; queries->bytes_on_board = 0; } void loadResultBuffer(MatchContext* ctx) { unsigned int numQueries = ctx->queries->count; assert (numQueries); int match_length = ctx->min_match_length; unsigned int numCoords = 0; numCoords = ctx->queries->texlen - numQueries * (match_length + 1); ctx->results.numCoords = numCoords; fprintf(stderr, "Allocating result array for %d queries (%d bytes) ...",numQueries, numCoords*sizeof(MatchCoord) ); ctx->results.h_match_coords = (MatchCoord*) calloc( numCoords, sizeof(MatchCoord)); if (!ctx->on_cpu) { //unsigned int toboardtimer = 0; //createTimer(&toboardtimer); //startTimer(toboardtimer); ctx->results.bytes_on_board = 0; CUDA_SAFE_CALL( cudaMalloc( (void**) &ctx->results.d_match_coords, numCoords * sizeof(MatchCoord))); ctx->results.bytes_on_board += numCoords * sizeof(MatchCoord); CUDA_SAFE_CALL( cudaMemset( (void*)ctx->results.d_match_coords, 0, numCoords * sizeof(MatchCoord))); //stopTimer(toboardtimer); //ctx->statistics.t_to_board += getTimerValue(toboardtimer); //deleteTimer(toboardtimer); } else { ctx->results.d_match_coords = NULL; } fprintf(stderr, "done\n"); } void unloadResultBuffer(MatchContext* ctx) { CUDA_SAFE_CALL(cudaFree(ctx->results.d_match_coords)); ctx->results.bytes_on_board = 0; } void freeResults(MatchContext* ctx, ReferencePage pages[], unsigned int num_pages) { for (int i = 0; i < num_pages; ++i) { free(pages[i].results.h_match_coords); } } void transferResultsFromDevice(MatchContext* ctx,cudaStream_t stream_app) { if (!ctx->on_cpu) { //unsigned int fromboardtimer = 0; //createTimer(&fromboardtimer); //startTimer(fromboardtimer); CUDA_SAFE_CALL(cudaMemcpyAsync(ctx->results.h_match_coords, ctx->results.d_match_coords, ctx->results.numCoords * sizeof(MatchCoord), cudaMemcpyDeviceToHost, stream_app) ); //stopTimer(fromboardtimer); //ctx->statistics.t_from_board += getTimerValue(fromboardtimer); //deleteTimer(fromboardtimer); } } int flushOutput(); int addToBuffer(char* string); inline int match_coord_addrs(int qryid, int qry_addrs, int match_length) { return qry_addrs - qryid * (match_length + 1); } #define MAX_QUERY_LEN 8192 struct packed_slot { unsigned short page; unsigned short qpos; MatchCoord coord; }; struct packed_slot_array { packed_slot* slots; unsigned int num_slots; }; void addPackedOutput(MatchContext* ctx, packed_slot_array** curr_output, packed_slot_array slot_array[]) { unsigned int numQueries = ctx->queries->count; if (*curr_output == NULL) { *curr_output = slot_array; } else { for (int i = 0; i < numQueries; i++) { if (slot_array[i].num_slots) { //packed_slot_array* slots = &(slot_array[i]); (*curr_output)[i].slots = (packed_slot*)realloc((*curr_output)[i].slots, ((*curr_output)[i].num_slots + slot_array[i].num_slots) * sizeof(packed_slot)); memcpy((*curr_output)[i].slots + (*curr_output)[i].num_slots, slot_array[i].slots, slot_array[i].num_slots * sizeof(packed_slot)); (*curr_output)[i].num_slots += slot_array[i].num_slots; free(slot_array[i].slots); } } free(slot_array); } } char numbuffer[32]; void printRCSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) { char* h_tex_array = ctx->queries->h_tex_array; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int qrylen = ctx->queries->h_lengths_array[qry]; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + qry)); addToBuffer(" Reverse"); if (ctx->show_query_length) { addToBuffer(" Len = "); sprintf(numbuffer, "%d", qrylen); addToBuffer(numbuffer); } addToBuffer("\n"); for (int j = 0; j < slots->num_slots; ++j) { packed_slot slot = slots->slots[j]; if (slot.coord.edge_match_length & FRMASK) { printAlignments(ctx->full_ref, &(pages[slot.page]), h_tex_array + h_addrs_tex_array[qry], qrylen, slot.coord.node, slot.qpos, (slot.coord.edge_match_length & FRUMASK), ctx->min_match_length, 1, ctx->forwardcoordinates); } } } int FOO; void printForwardSlots(MatchContext * ctx, ReferencePage pages[], int qry, packed_slot_array * slots) { char* h_tex_array = ctx->queries->h_tex_array; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int qrylen = ctx->queries->h_lengths_array[qry]; addToBuffer("> "); addToBuffer(*(ctx->queries->h_names + qry)); if (ctx->show_query_length) { addToBuffer(" Len = "); sprintf(numbuffer, "%d", qrylen); addToBuffer(numbuffer); } addToBuffer("\n"); for (int j = 0; j < slots->num_slots; ++j) { packed_slot slot = slots->slots[j]; if (!(slot.coord.edge_match_length & FRMASK)) { printAlignments(ctx->full_ref, &(pages[slot.page]), h_tex_array + h_addrs_tex_array[qry], qrylen, slot.coord.node, slot.qpos, slot.coord.edge_match_length, ctx->min_match_length, 0, ctx->forwardcoordinates); } } FOO += slots->num_slots; } void printPackedResults(MatchContext* ctx, ReferencePage pages[], packed_slot_array slot_array[]) { unsigned int numQueries = ctx->queries->count; FOO = 0; for (int qry = 0; qry < numQueries; qry++) { packed_slot_array* slots = &(slot_array[qry]); if (ctx->reverse) { printRCSlots(ctx, pages, qry, slots); } else { printForwardSlots(ctx, pages, qry, slots); if (ctx->forwardreverse) { printRCSlots(ctx, pages, qry, slots); } } } printf("FOO = %d\n", FOO); flushOutput(); } void packSlots(MatchContext* ctx, MatchResults* results, unsigned int page_num, packed_slot_array** slot_arrays, bool rc) { unsigned int numQueries = ctx->queries->count; int* h_addrs_tex_array = ctx->queries->h_addrs_tex_array; int match_length = ctx->min_match_length; *slot_arrays = (packed_slot_array*)calloc(numQueries, sizeof(packed_slot_array)); for (int i = 0; i < numQueries; i++) { int qlen; if (i == numQueries - 1) qlen = ctx->queries->texlen - h_addrs_tex_array[i] - match_length; else qlen = h_addrs_tex_array[i + 1] - h_addrs_tex_array[i] - match_length; packed_slot* qslots = (packed_slot*)calloc(qlen, sizeof(packed_slot)); int filled = 0; for (int p = 0; p < qlen; ++p) { MatchCoord* coords = results->h_match_coords; int query_coord_begin = match_coord_addrs(i, h_addrs_tex_array[i], match_length); int query_coord_end = i < numQueries - 1 ? match_coord_addrs(i + 1, h_addrs_tex_array[i + 1], match_length) : results->numCoords; int query_coord = query_coord_begin + p; if ((query_coord < query_coord_end) && (coords[query_coord].node > 1) && (!(coords[query_coord].edge_match_length & FRMASK) == !rc)) { packed_slot s; s.page = page_num; s.qpos = p; s.coord = coords[query_coord]; qslots[filled++] = s; } } if (filled) { packed_slot* pslots = (packed_slot*)calloc(filled, sizeof(packed_slot)); memcpy(pslots, qslots, (filled)*sizeof(packed_slot)); (*slot_arrays)[i].slots = pslots; (*slot_arrays)[i].num_slots = filled; } else { (*slot_arrays)[i].slots = NULL; (*slot_arrays)[i].num_slots = 0; } free(qslots); } } int getQueryBlock(MatchContext* ctx, size_t device_mem_avail) { QuerySet* queries = ctx->queries; char * queryTex = NULL; int* queryAddrs = NULL; int* queryLengths = NULL; unsigned int numQueries; size_t queryLen; char** names; //unsigned int queryreadtimer = 0; //createTimer(&queryreadtimer); //startTimer(queryreadtimer); getQueriesTexture(queries->qfile, &queryTex, &queryLen, &queryAddrs, &names, &queryLengths, &numQueries, device_mem_avail, ctx->min_match_length, ctx->reverse || ctx->forwardreverse); //stopTimer(queryreadtimer); //ctx->statistics.t_query_read += getTimerValue(queryreadtimer); //deleteTimer(queryreadtimer); queries->h_tex_array = queryTex; queries->count = numQueries; queries->h_addrs_tex_array = queryAddrs; queries->texlen = queryLen; queries->h_names = names; queries->h_lengths_array = queryLengths; return numQueries; } void destroyQueryBlock(QuerySet* queries) { free(queries->h_tex_array); queries->h_tex_array = NULL; for (int i = 0; i < queries->count; ++i) free(queries->h_names[i]); free(queries->h_names); queries->count = 0; queries->texlen = 0; free(queries->h_addrs_tex_array); queries->h_addrs_tex_array = NULL; free(queries->h_lengths_array); queries->h_lengths_array = NULL; } void writeStatisticsFile(MatchContext* ctx, char* stats_filename) { if (!stats_filename) return; FILE* f = fopen(stats_filename, "w"); if (!f) { fprintf(stderr, "WARNING: could not open %s for writing\n", stats_filename); return; } fprintf(f, "Total,%f\n", ctx->statistics.t_total); fprintf(f, "Kernel,%f\n", ctx->statistics.t_kernel); fprintf(f, "Print matches,%f\n", ctx->statistics.t_output); fprintf(f, "Copy queries to GPU,%f\n", ctx->statistics.t_to_board); fprintf(f, "Copy output from GPU,%f\n", ctx->statistics.t_from_board); fprintf(f, "Copy suffix tree to GPU,%f\n", ctx->statistics.t_moving_tree_pages); fprintf(f, "Read queries from disk,%f\n", ctx->statistics.t_query_read); fprintf(f, "Suffix tree constructions,%f\n", ctx->statistics.t_construction); fprintf(f, "Minimum substring length, %d\n", ctx->min_match_length); fprintf(f, "Average query length, %f\n", ctx->statistics.bp_avg_query_length); fclose(f); } int matchSubset(MatchContext* ctx, int query_block_offset, ReferencePage pages[], unsigned int num_pages,cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { loadQueries(ctx, stream_app); packed_slot_array* packed_slots = NULL; for (unsigned int i = 0; i < num_pages; ++i) { ctx->ref = &(pages[i].ref); loadReference(ctx); loadResultBuffer(ctx); //unsigned int ktimer = 0; //createTimer(&ktimer); unsigned int numQueries = ctx->queries->count; int blocksize = (numQueries > BLOCKSIZE) ? BLOCKSIZE : numQueries; dim3 dimBlock(blocksize,1,1); dim3 dimGrid(ceil(numQueries/(float)BLOCKSIZE), 1, 1); if (!ctx->on_cpu) { fprintf(stderr,"Using blocks of %d x %d x %d threads\n", dimBlock.x, dimBlock.y, dimBlock.z); fprintf(stderr,"Using a grid of %d x %d x %d blocks\n", dimGrid.x, dimGrid.y, dimBlock.z); fprintf(stderr,"Memory footprint is:\n\tqueries: %d\n\tref: %d\n\tresults: %d\n", ctx->queries->bytes_on_board, ctx->ref->bytes_on_board, ctx->results.bytes_on_board); } //startTimer(ktimer); bool alignRC = ctx->reverse; if (ctx->on_cpu) { if (alignRC) { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, FORWARD); } } else { pthread_mutex_lock (mutexapp); if (alignRC) { mummergpuRCKernel<<< dimGrid, dimBlock, 0 ,stream_app>>>(ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } else { mummergpuKernel<<< dimGrid, dimBlock, 0 , stream_app>>>(ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); } pthread_mutex_unlock (mutexapp); } if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); // check if kernel execution generated an error cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Kernel execution failed: %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //stopTimer(ktimer); //float ktime = getTimerValue(ktimer); //ctx->statistics.t_kernel += ktime; // fprintf(stderr,"kernel time= %f\n", ktime); //deleteTimer(ktimer); transferResultsFromDevice(ctx, stream_app); pages[i].results = ctx->results; packed_slot_array* packed; packSlots(ctx, &(pages[i].results), i, &packed, ctx->reverse); addPackedOutput(ctx, &packed_slots, packed); // now compute the reverse matches. if (ctx->forwardreverse) { //unsigned int rctimer = 0; //createTimer(&rctimer); //startTimer(rctimer); if (ctx->on_cpu) { computeGold(&ctx->results, ctx->ref->str, ctx->queries->h_tex_array, ctx->queries->h_addrs_tex_array, ctx->queries->h_lengths_array, (PixelOfNode*)(ctx->ref->h_node_tex_array), (PixelOfChildren*)(ctx->ref->h_children_tex_array), ctx->queries->count, ctx->min_match_length, REVERSE); } else { pthread_mutex_lock (mutexapp); mummergpuRCKernel<<< dimGrid, dimBlock, 0 , stream_app>>>(ctx->results.d_match_coords, ctx->queries->d_tex_array, ctx->queries->d_addrs_tex_array, ctx->queries->d_lengths_array, numQueries, ctx->min_match_length); pthread_mutex_unlock (mutexapp); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); else cudaThreadSynchronize(); } //stopTimer(rctimer); //float rctime = getTimerValue(rctimer); //ctx->statistics.t_kernel += rctime; //fprintf(stderr,"rc kernel time= %f\n", rctime); //deleteTimer(rctimer); transferResultsFromDevice(ctx, stream_app); if(flag) cutilSafeCall( cudaStreamSynchronize(stream_app) ); pages[i].results = ctx->results; packed_slot_array* packed; packSlots(ctx, &(pages[i].results), i, &packed, 1); addPackedOutput(ctx, &packed_slots, packed); } free(pages[i].results.h_match_coords); pages[i].results.h_match_coords = NULL; unloadReference(ctx); unloadResultBuffer(ctx); } //unsigned int otimer = 0; //createTimer(&otimer); //startTimer(otimer); printPackedResults(ctx, pages, packed_slots); //stopTimer(otimer); //ctx->statistics.t_output += getTimerValue(otimer); //deleteTimer(otimer); for (int i = 0; i < ctx->queries->count; ++i) { free(packed_slots[i].slots); } free(packed_slots); unloadQueries(ctx); return 0; } #define BREATHING_ROOM (64 * 1024 * 1024) #define BASES_PER_TREE_PAGE 7500000 #define CHUMP_CHANGE 1500000 extern "C" int matchQueries(MatchContext* ctx,cudaStream_t stream_app, pthread_mutex_t *mutexapp, bool flag) { assert(sizeof(struct PixelOfNode) == sizeof(ulong4)); assert(sizeof(struct PixelOfChildren) == sizeof(ulong4)); ctx->statistics.t_kernel = 0.0; ctx->statistics.t_output = 0.0; ctx->statistics.t_to_board = 0.0; ctx->statistics.t_from_board = 0.0; ctx->statistics.t_moving_tree_pages = 0.0; ctx->statistics.t_query_read = 0.0; ctx->statistics.t_total = 0.0; ctx->statistics.t_construction = 0.0; ctx->statistics.bp_avg_query_length = 0.0; //unsigned int ttimer = 0; //createTimer(&ttimer); //startTimer(ttimer); //unsigned int ctimer = 0; //createTimer(&ctimer); //startTimer(ctimer); unsigned int bases_in_ref = ctx->full_ref_len - 3; unsigned int page_size = BASES_PER_TREE_PAGE < bases_in_ref ? BASES_PER_TREE_PAGE : bases_in_ref; unsigned int num_reference_pages = bases_in_ref / page_size; ReferencePage* pages = (ReferencePage*)calloc(num_reference_pages, sizeof(ReferencePage)); unsigned int page_overlap = MAX_QUERY_LEN + 1; pages[0].begin = 1; pages[0].end = pages[0].begin + page_size + ceil(page_overlap / 2.0) + 1; //the 1 is for the 's' at the beginning pages[0].shadow_left = -1; pages[0].id = 0; buildReferenceTexture(&(pages[0].ref), ctx->full_ref, pages[0].begin, pages[0].end); for (int i = 1; i < num_reference_pages - 1; ++i) { pages[i].begin = pages[i - 1].end - page_overlap; pages[i].end = pages[i].begin + page_size + page_overlap; pages[i - 1].shadow_right = pages[i].begin; pages[i].shadow_left = pages[i-1].end; pages[i].id = i; buildReferenceTexture(&(pages[i].ref), ctx->full_ref, pages[i].begin, pages[i].end); } if (num_reference_pages > 1) { int last_page = num_reference_pages - 1; pages[last_page].begin = pages[last_page - 1].end - page_overlap; pages[last_page].end = ctx->full_ref_len - 1; pages[last_page - 1].shadow_right = pages[last_page].begin; pages[last_page].shadow_right = -1; pages[last_page].shadow_left = pages[last_page - 1].end; pages[last_page].id = last_page; buildReferenceTexture(&(pages[last_page].ref), ctx->full_ref, pages[last_page].begin, pages[last_page].end); } //stopTimer(ctimer); //ctx->statistics.t_construction += getTimerValue(ctimer); //deleteTimer(ctimer); cudaDeviceProp props; if (!ctx->on_cpu) { int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount != 1) { //fprintf(stderr, "Fatal error: no CUDA-capable device found, exiting\n"); //return -1; } cudaGetDeviceProperties(&props, 0); fprintf(stderr, "Running under CUDA %d.%d\n", props.major, props.minor); fprintf(stderr, "CUDA device has %d bytes of memory\n", props.totalGlobalMem); } else { props.totalGlobalMem = 804585472; // pretend we are on a 8800 GTX } size_t mem_avail = 0; for (int i = 0; i < num_reference_pages; ++i) { mem_avail = max((unsigned int)pages[i].ref.bytes_on_board, (unsigned int)mem_avail); } mem_avail = props.totalGlobalMem - mem_avail; fprintf(stderr, "There are %d bytes left on the board\n", mem_avail); mem_avail -= BREATHING_ROOM; while (getQueryBlock(ctx, mem_avail)) { pthread_mutex_unlock (mutexapp); matchSubset(ctx, 0, pages, num_reference_pages,stream_app,mutexapp,flag); ctx->statistics.bp_avg_query_length = ctx->queries->texlen/(float)(ctx->queries->count) - 2; destroyQueryBlock(ctx->queries); //cudaThreadExit(); } for (int i = 0; i < num_reference_pages; ++i) { destroyReference(&(pages[i].ref)); } free(pages); //stopTimer(ttimer); //ctx->statistics.t_total += getTimerValue(ttimer); //deleteTimer(ttimer); writeStatisticsFile(ctx, ctx->stats_file); return 0; }
a412d1d406762b038158268bad80d36c13b0f715.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size); __device__ int add(int a, int b) { return a + b; } __global__ void addKernel(int* c, const int* a, const int* b) { int x = threadIdx.x; int y = threadIdx.y; int i = y * 3 + x; c[i] = add(a[i], b[i]); } __global__ void helloFromGPU() { printf("hello from GPU\n"); } int cudamain() { hipDeviceProp_t cdp0, cdp1; hipGetDeviceProperties(&cdp0, 0); std::cout << cdp0.name << std::endl; const int arraySize = 6; const int a[arraySize] = {1, 2, 3, 4, 5, 6}; const int b[arraySize] = {10, 20, 30, 40, 50, 11}; int c[arraySize] = {0}; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); // helloFromGPU <<<1, 10 >>> (); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4], c[5]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = nullptr; int* dev_b = nullptr; int* dev_c = nullptr; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } dim3 block(size / 2, 2); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(block), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } // hipDeviceReset(); Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
a412d1d406762b038158268bad80d36c13b0f715.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size); __device__ int add(int a, int b) { return a + b; } __global__ void addKernel(int* c, const int* a, const int* b) { int x = threadIdx.x; int y = threadIdx.y; int i = y * 3 + x; c[i] = add(a[i], b[i]); } __global__ void helloFromGPU() { printf("hello from GPU\n"); } int cudamain() { cudaDeviceProp cdp0, cdp1; cudaGetDeviceProperties(&cdp0, 0); std::cout << cdp0.name << std::endl; const int arraySize = 6; const int a[arraySize] = {1, 2, 3, 4, 5, 6}; const int b[arraySize] = {10, 20, 30, 40, 50, 11}; int c[arraySize] = {0}; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); // helloFromGPU <<<1, 10 >>> (); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4], c[5]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int* c, const int* a, const int* b, unsigned int size) { int* dev_a = nullptr; int* dev_b = nullptr; int* dev_c = nullptr; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } dim3 block(size / 2, 2); // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, block>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } // cudaDeviceReset(); Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
094c36b750c4312f4602bd755dc5f6fecf04a281.hip
// !!! This is a file automatically generated by hipify!!! #include "ATen/Context.h" #include "ATen/hip/HIPContext.h" #include "ATen/Dispatch.h" #include "ATen/NativeFunctions.h" #include "ATen/hip/PinnedMemoryAllocator.h" #include "ATen/hip/HIPApplyUtils.cuh" #include "ATen/native/LinearAlgebraUtils.h" #include "ATen/native/hip/MiscUtils.h" #include "THH.h" // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaGesvBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { AT_ERROR("gesv only takes float or double Tensors"); } template<class scalar_t> void magmaGetrfBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("getrf only takes float or double Tensors"); } template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("getri only takes float or double Tensors"); } template<class scalar_t> void magmaPotrsBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("potrs only takes float or double Tensors"); } template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("cholesky only takes float or double Tensors"); } template<> void magmaGesvBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaGesvBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaGetrfBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetrfBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaPotrsBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaPotrsBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } #endif #define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \ auto storage_##name = pin_memory<type>(size, dummy_tensor); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("gesv: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); magmaGesvBatched<scalar_t>( n, nrhs, A_array, n, ipiv_array, b_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{ apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos); }); batchCheckErrors(infos, "gesv"); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaGetrfBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); magmaGetriBatched<scalar_t>( n, self_array, n, ipiv_array, self_inv_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } // Because this is out-of-place inverse, the predefined macros will // not work Tensor _inverse_helper_cuda(const Tensor& self) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); auto self_inv_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{ apply_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse"); return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ potrs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_potrs(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("potrs: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t info_tmp; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); magmaPotrsBatched<scalar_t>( uplo, n, nrhs, A_array, n, b_array, n, info_tmp, batch_size, magma_queue); info = info_tmp; #endif } Tensor _potrs_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.type(), "potrs", [&]{ apply_potrs<scalar_t>(self_working_copy, A_working_copy, upper, info); }); AT_CHECK(info == 0, "MAGMA potrs : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data<scalar_t>(); auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); magmaCholeskyBatched<scalar_t>( uplo, n, self_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); batchCheckErrors(infos, "cholesky"); if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } }} // namespace at::native #undef ALLOCATE_ARRAY
094c36b750c4312f4602bd755dc5f6fecf04a281.cu
#include "ATen/Context.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/Dispatch.h" #include "ATen/NativeFunctions.h" #include "ATen/cuda/PinnedMemoryAllocator.h" #include "ATen/cuda/CUDAApplyUtils.cuh" #include "ATen/native/LinearAlgebraUtils.h" #include "ATen/native/cuda/MiscUtils.h" #include "THC.h" // for USE_MAGMA #ifdef USE_MAGMA #include <magma.h> #include <magma_types.h> #endif namespace at { namespace native { #ifdef USE_MAGMA template<class scalar_t> void magmaGesvBatched( magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { AT_ERROR("gesv only takes float or double Tensors"); } template<class scalar_t> void magmaGetrfBatched( magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("getrf only takes float or double Tensors"); } template<class scalar_t> void magmaGetriBatched( magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("getri only takes float or double Tensors"); } template<class scalar_t> void magmaPotrsBatched( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("potrs only takes float or double Tensors"); } template<class scalar_t> void magmaCholeskyBatched( magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { AT_ERROR("cholesky only takes float or double Tensors"); } template<> void magmaGesvBatched<double>( magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaGesvBatched<float>( magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb, magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) { magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue()); } template<> void magmaGetrfBatched<double>( magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetrfBatched<float>( magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<double>( magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaGetriBatched<float>( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaPotrsBatched<double>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaPotrsBatched<float>( magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) { info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<double>( magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } template<> void magmaCholeskyBatched<float>( magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) { magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue()); } #endif #define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \ auto storage_##name = pin_memory<type>(size, dummy_tensor); \ name = static_cast<type*>(storage_##name.data()); // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ gesv ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_gesv(Tensor& b, Tensor& A, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("gesv: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); magmaGesvBatched<scalar_t>( n, nrhs, A_array, n, ipiv_array, b_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } std::tuple<Tensor, Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{ apply_gesv<scalar_t>(self_working_copy, A_working_copy, infos); }); batchCheckErrors(infos, "gesv"); return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy); } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_inverse(Tensor &self, Tensor &self_inv, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("inverse: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else auto self_data = self.data<scalar_t>(); auto self_mat_stride = matrixStride(self); auto self_inv_data = self_inv.data<scalar_t>(); auto self_inv_mat_stride = matrixStride(self_inv); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** self_array; scalar_t** self_inv_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self); ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, self); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, self); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self); ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size, self_inv); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(self.get_device()); magmaGetrfBatched<scalar_t>( n, n, self_array, n, ipiv_array, info_array, batch_size, magma_queue); magmaGetriBatched<scalar_t>( n, self_array, n, ipiv_array, self_inv_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } // Because this is out-of-place inverse, the predefined macros will // not work Tensor _inverse_helper_cuda(const Tensor& self) { std::vector<int64_t> infos(batchCount(self), 0); auto self_working_copy = cloneBatchedColumnMajor(self); auto self_inv_working_copy = cloneBatchedColumnMajor(self); AT_DISPATCH_FLOATING_TYPES(self.type(), "inverse", [&]{ apply_inverse<scalar_t>( self_working_copy, self_inv_working_copy, infos); }); batchCheckErrors(infos, "inverse"); return self_inv_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ potrs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_potrs(Tensor& b, Tensor& A, bool upper, int64_t& info) { #ifndef USE_MAGMA AT_ERROR("potrs: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto A_data = A.data<scalar_t>(); auto b_data = b.data<scalar_t>(); auto A_mat_stride = matrixStride(A); auto b_mat_stride = matrixStride(b); magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount"); magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)"); magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)"); magma_int_t info_tmp; magma_int_t* ipiv_data; magma_int_t** ipiv_array; scalar_t** A_array; scalar_t** b_array; ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b); ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b); ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b); ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { A_array[i] = &A_data[i * A_mat_stride]; b_array[i] = &b_data[i * b_mat_stride]; ipiv_array[i] = &ipiv_data[i * n]; } MAGMAQueue magma_queue(b.get_device()); magmaPotrsBatched<scalar_t>( uplo, n, nrhs, A_array, n, b_array, n, info_tmp, batch_size, magma_queue); info = info_tmp; #endif } Tensor _potrs_helper_cuda(const Tensor& self, const Tensor& A, bool upper) { int64_t info = 0; auto self_working_copy = cloneBatchedColumnMajor(self); auto A_working_copy = cloneBatchedColumnMajor(A); AT_DISPATCH_FLOATING_TYPES(self.type(), "potrs", [&]{ apply_potrs<scalar_t>(self_working_copy, A_working_copy, upper, info); }); AT_CHECK(info == 0, "MAGMA potrs : invalid argument: ", -info); return self_working_copy; } // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ template <typename scalar_t> static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) { #ifndef USE_MAGMA AT_ERROR("cholesky: MAGMA library not found in " "compilation. Please rebuild with MAGMA."); #else magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower; auto self_data = self.data<scalar_t>(); auto self_mat_stride = matrixStride(self); magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount"); magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)"); magma_int_t* info_array; scalar_t** self_array; ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, self); ALLOCATE_ARRAY(self_array, scalar_t*, batch_size, self); // Set up the created arrays for (int64_t i = 0; i < batch_size; i++) { self_array[i] = &self_data[i * self_mat_stride]; } MAGMAQueue magma_queue(self.get_device()); magmaCholeskyBatched<scalar_t>( uplo, n, self_array, n, info_array, batch_size, magma_queue); for (int64_t i = 0; i < batch_size; i++) { infos[i] = info_array[i]; } #endif } Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) { std::vector<int64_t> infos(batchCount(self), 0); Tensor self_working_copy; if (upper) { self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2)); } else { self_working_copy = cloneBatchedColumnMajor(self); } AT_DISPATCH_FLOATING_TYPES(self.type(), "cholesky", [&]{ apply_cholesky<scalar_t>(self_working_copy, false, infos); }); batchCheckErrors(infos, "cholesky"); if (upper) { return self_working_copy.transpose(-1, -2); } else { return self_working_copy; } } }} // namespace at::native #undef ALLOCATE_ARRAY
6a783c61ac454a5da52b1cb9ae93714959914155.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "../KeyFinder/KeySearchTypes.h" #include "CudaKeySearchDevice.h" #include "../CudaMath/ptx.cuh" #include "../CudaMath/secp256k1.cuh" #include "../CudaMath/sha256.cuh" #include "../CudaMath/ripemd160.cuh" #include "../Secp256k1/secp256k1.h" #include "CudaHashLookup.cuh" #include "CudaAtomicList.cuh" #include "CudaDeviceKeys.cuh" __constant__ unsigned int _INC_X[8]; __constant__ unsigned int _INC_Y[8]; __constant__ unsigned int* _CHAIN[1]; static unsigned int* _chainBufferPtr = NULL; __device__ void doRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5]) { const unsigned int iv[5] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 }; for (int i = 0; i < 5; i++) { hOut[i] = endian(hIn[i] + iv[(i + 1) % 5]); } } /** * Allocates device memory for storing the multiplication chain used in the batch inversion operation */ hipError_t allocateChainBuf(unsigned int count) { hipError_t err = hipMalloc(&_chainBufferPtr, count * sizeof(unsigned int) * 8); if (err) { return err; } err = hipMemcpyToSymbol(_CHAIN, &_chainBufferPtr, sizeof(unsigned int*)); if (err) { hipFree(_chainBufferPtr); } return err; } void cleanupChainBuf() { if (_chainBufferPtr != NULL) { hipFree(_chainBufferPtr); _chainBufferPtr = NULL; } } /** *Sets the EC point which all points will be incremented by */ hipError_t setIncrementorPoint(const secp256k1::uint256& x, const secp256k1::uint256& y) { unsigned int xWords[8]; unsigned int yWords[8]; x.exportWords(xWords, 8, secp256k1::uint256::BigEndian); y.exportWords(yWords, 8, secp256k1::uint256::BigEndian); hipError_t err = hipMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8); if (err) { return err; } return hipMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8); } __device__ void hashPublicKey(const unsigned int* x, const unsigned int* y, unsigned int* digestOut) { unsigned int hash[8]; sha256PublicKey(x, y, hash); // Swap to little-endian for (int i = 0; i < 8; i++) { hash[i] = endian(hash[i]); } ripemd160sha256NoFinal(hash, digestOut); } __device__ void hashPublicKeyCompressed(const unsigned int* x, unsigned int yParity, unsigned int* digestOut) { unsigned int hash[8]; sha256PublicKeyCompressed(x, yParity, hash); // Swap to little-endian for (int i = 0; i < 8; i++) { hash[i] = endian(hash[i]); } ripemd160sha256NoFinal(hash, digestOut); } __device__ void setResultFound(int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5]) { CudaDeviceResult r; r.block = blockIdx.x; r.thread = threadIdx.x; r.idx = idx; r.compressed = compressed; for (int i = 0; i < 8; i++) { r.x[i] = x[i]; r.y[i] = y[i]; } doRMD160FinalRound(digest, r.digest); atomicListAdd(&r, sizeof(r)); } __device__ void doIteration(int pointsPerThread, int compression) { unsigned int* chain = _CHAIN[0]; unsigned int* xPtr = ec::getXPtr(); unsigned int* yPtr = ec::getYPtr(); // Multiply together all (_Gx - x) and then invert unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 }; for (int i = 0; i < pointsPerThread; i++) { unsigned int x[8]; unsigned int digest[5]; readInt(xPtr, i, x); if (compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) { unsigned int y[8]; readInt(yPtr, i, y); hashPublicKey(x, y, digest); if (checkHash(digest)) { setResultFound(i, false, x, y, digest); } } if (compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) { hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest); if (checkHash(digest)) { unsigned int y[8]; readInt(yPtr, i, y); setResultFound(i, true, x, y, digest); } } beginBatchAdd(_INC_X, x, chain, i, i, inverse); } doBatchInverse(inverse); for (int i = pointsPerThread - 1; i >= 0; i--) { unsigned int newX[8]; unsigned int newY[8]; completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, i, chain, inverse, newX, newY); writeInt(xPtr, i, newX); writeInt(yPtr, i, newY); } } __device__ void doIterationWithDouble(int pointsPerThread, int compression) { unsigned int* chain = _CHAIN[0]; unsigned int* xPtr = ec::getXPtr(); unsigned int* yPtr = ec::getYPtr(); // Multiply together all (_Gx - x) and then invert unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 }; for (int i = 0; i < pointsPerThread; i++) { unsigned int x[8]; unsigned int digest[5]; readInt(xPtr, i, x); // uncompressed if (compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) { unsigned int y[8]; readInt(yPtr, i, y); hashPublicKey(x, y, digest); if (checkHash(digest)) { setResultFound(i, false, x, y, digest); } } // compressed if (compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) { hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest); if (checkHash(digest)) { unsigned int y[8]; readInt(yPtr, i, y); setResultFound(i, true, x, y, digest); } } beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, i, inverse); } doBatchInverse(inverse); for (int i = pointsPerThread - 1; i >= 0; i--) { unsigned int newX[8]; unsigned int newY[8]; completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, i, chain, inverse, newX, newY); writeInt(xPtr, i, newX); writeInt(yPtr, i, newY); } } /** * Performs a single iteration */ __global__ void keyFinderKernel(int points, int compression) { doIteration(points, compression); } __global__ void keyFinderKernelWithDouble(int points, int compression) { doIterationWithDouble(points, compression); }
6a783c61ac454a5da52b1cb9ae93714959914155.cu
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "../KeyFinder/KeySearchTypes.h" #include "CudaKeySearchDevice.h" #include "../CudaMath/ptx.cuh" #include "../CudaMath/secp256k1.cuh" #include "../CudaMath/sha256.cuh" #include "../CudaMath/ripemd160.cuh" #include "../Secp256k1/secp256k1.h" #include "CudaHashLookup.cuh" #include "CudaAtomicList.cuh" #include "CudaDeviceKeys.cuh" __constant__ unsigned int _INC_X[8]; __constant__ unsigned int _INC_Y[8]; __constant__ unsigned int* _CHAIN[1]; static unsigned int* _chainBufferPtr = NULL; __device__ void doRMD160FinalRound(const unsigned int hIn[5], unsigned int hOut[5]) { const unsigned int iv[5] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 }; for (int i = 0; i < 5; i++) { hOut[i] = endian(hIn[i] + iv[(i + 1) % 5]); } } /** * Allocates device memory for storing the multiplication chain used in the batch inversion operation */ cudaError_t allocateChainBuf(unsigned int count) { cudaError_t err = cudaMalloc(&_chainBufferPtr, count * sizeof(unsigned int) * 8); if (err) { return err; } err = cudaMemcpyToSymbol(_CHAIN, &_chainBufferPtr, sizeof(unsigned int*)); if (err) { cudaFree(_chainBufferPtr); } return err; } void cleanupChainBuf() { if (_chainBufferPtr != NULL) { cudaFree(_chainBufferPtr); _chainBufferPtr = NULL; } } /** *Sets the EC point which all points will be incremented by */ cudaError_t setIncrementorPoint(const secp256k1::uint256& x, const secp256k1::uint256& y) { unsigned int xWords[8]; unsigned int yWords[8]; x.exportWords(xWords, 8, secp256k1::uint256::BigEndian); y.exportWords(yWords, 8, secp256k1::uint256::BigEndian); cudaError_t err = cudaMemcpyToSymbol(_INC_X, xWords, sizeof(unsigned int) * 8); if (err) { return err; } return cudaMemcpyToSymbol(_INC_Y, yWords, sizeof(unsigned int) * 8); } __device__ void hashPublicKey(const unsigned int* x, const unsigned int* y, unsigned int* digestOut) { unsigned int hash[8]; sha256PublicKey(x, y, hash); // Swap to little-endian for (int i = 0; i < 8; i++) { hash[i] = endian(hash[i]); } ripemd160sha256NoFinal(hash, digestOut); } __device__ void hashPublicKeyCompressed(const unsigned int* x, unsigned int yParity, unsigned int* digestOut) { unsigned int hash[8]; sha256PublicKeyCompressed(x, yParity, hash); // Swap to little-endian for (int i = 0; i < 8; i++) { hash[i] = endian(hash[i]); } ripemd160sha256NoFinal(hash, digestOut); } __device__ void setResultFound(int idx, bool compressed, unsigned int x[8], unsigned int y[8], unsigned int digest[5]) { CudaDeviceResult r; r.block = blockIdx.x; r.thread = threadIdx.x; r.idx = idx; r.compressed = compressed; for (int i = 0; i < 8; i++) { r.x[i] = x[i]; r.y[i] = y[i]; } doRMD160FinalRound(digest, r.digest); atomicListAdd(&r, sizeof(r)); } __device__ void doIteration(int pointsPerThread, int compression) { unsigned int* chain = _CHAIN[0]; unsigned int* xPtr = ec::getXPtr(); unsigned int* yPtr = ec::getYPtr(); // Multiply together all (_Gx - x) and then invert unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 }; for (int i = 0; i < pointsPerThread; i++) { unsigned int x[8]; unsigned int digest[5]; readInt(xPtr, i, x); if (compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) { unsigned int y[8]; readInt(yPtr, i, y); hashPublicKey(x, y, digest); if (checkHash(digest)) { setResultFound(i, false, x, y, digest); } } if (compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) { hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest); if (checkHash(digest)) { unsigned int y[8]; readInt(yPtr, i, y); setResultFound(i, true, x, y, digest); } } beginBatchAdd(_INC_X, x, chain, i, i, inverse); } doBatchInverse(inverse); for (int i = pointsPerThread - 1; i >= 0; i--) { unsigned int newX[8]; unsigned int newY[8]; completeBatchAdd(_INC_X, _INC_Y, xPtr, yPtr, i, i, chain, inverse, newX, newY); writeInt(xPtr, i, newX); writeInt(yPtr, i, newY); } } __device__ void doIterationWithDouble(int pointsPerThread, int compression) { unsigned int* chain = _CHAIN[0]; unsigned int* xPtr = ec::getXPtr(); unsigned int* yPtr = ec::getYPtr(); // Multiply together all (_Gx - x) and then invert unsigned int inverse[8] = { 0,0,0,0,0,0,0,1 }; for (int i = 0; i < pointsPerThread; i++) { unsigned int x[8]; unsigned int digest[5]; readInt(xPtr, i, x); // uncompressed if (compression == PointCompressionType::UNCOMPRESSED || compression == PointCompressionType::BOTH) { unsigned int y[8]; readInt(yPtr, i, y); hashPublicKey(x, y, digest); if (checkHash(digest)) { setResultFound(i, false, x, y, digest); } } // compressed if (compression == PointCompressionType::COMPRESSED || compression == PointCompressionType::BOTH) { hashPublicKeyCompressed(x, readIntLSW(yPtr, i), digest); if (checkHash(digest)) { unsigned int y[8]; readInt(yPtr, i, y); setResultFound(i, true, x, y, digest); } } beginBatchAddWithDouble(_INC_X, _INC_Y, xPtr, chain, i, i, inverse); } doBatchInverse(inverse); for (int i = pointsPerThread - 1; i >= 0; i--) { unsigned int newX[8]; unsigned int newY[8]; completeBatchAddWithDouble(_INC_X, _INC_Y, xPtr, yPtr, i, i, chain, inverse, newX, newY); writeInt(xPtr, i, newX); writeInt(yPtr, i, newY); } } /** * Performs a single iteration */ __global__ void keyFinderKernel(int points, int compression) { doIteration(points, compression); } __global__ void keyFinderKernelWithDouble(int points, int compression) { doIterationWithDouble(points, compression); }
803c70570b438a4bde577f95dcb66c2944068692.hip
// !!! This is a file automatically generated by hipify!!! char program[] = "CUDAPm1 v0.20"; /* CUDALucas.c Shoichiro Yamada Oct. 2010 This is an adaptation of Richard Crandall lucdwt.c, John Sweeney MacLucasUNIX.c, and Guillermo Ballester Valor MacLucasFFTW.c code. Improvement From Prime95. It also contains mfaktc code by Oliver Weihe and Eric Christenson adapted for CUDALucas use. Such code is under the GPL, and is noted as such. */ /* Include Files */ #include <stdlib.h> #include <stdio.h> #include <gmp.h> #include <math.h> #include <assert.h> #include <time.h> #include <signal.h> #include <sys/types.h> #include <sys/stat.h> #ifdef _MSC_VER //#define stat _stat #define strncasecmp strnicmp // _strnicmp #include <direct.h> #endif #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include "cuda_safecalls.h" #include "parse.h" /* In order to have the gettimeofday() function, you need these includes on Linux: #include <sys/time.h> #include <unistd.h> On Windows, you need #include <winsock2.h> and a definition for int gettimeofday (struct timeval *tv, struct timezone *) {} Both platforms are taken care of in parse.h and parse.c. */ /************************ definitions ************************************/ /* http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html base code from Takuya OOURA. */ /* global variables needed */ double *g_ttmp, *g_ttp1; double *g_x, *g_ct; double *e_data; double *rp_data; int *g_xint; char *size; int threads1, threads2 = 128, threads3= 128; float *g_err, g_max_err = 0.0f; int *g_datai, *g_carryi; long long int *g_datal, *g_carryl; hipfftHandle plan; hipDeviceProp_t dev; int fft_count; int multipliers[250]; int quitting, checkpoint_iter, fftlen, tfdepth=74, llsaved=2, s_f, t_f, r_f, d_f, k_f; int unused_mem = 100; int polite, polite_f; int b1 = 0, g_b1_commandline = 0; int g_b2 = 0, g_b2_commandline = 0; int g_d = 0, g_d_commandline = 0; int g_e = 0; int g_nrp = 0; int g_eb1 = 0; int keep_s1 = 0; char folder[132]; char input_filename[132], RESULTSFILE[132]; char INIFILE[132] = "CUDAPm1.ini"; char AID[132]; // Assignment key char s_residue[32]; __constant__ double g_ttp_inc[2]; __constant__ int g_qn[2]; /************************ kernels ************************************/ # define RINT_x86(x) (floor(x+0.5)) # define RINT(x) __rintd(x) void set_ttp_inc(double *h_ttp_inc){ hipMemcpyToSymbol(g_ttp_inc, h_ttp_inc, 2 * sizeof(double)); } void set_qn(int *h_qn){ hipMemcpyToSymbol(g_qn, h_qn, 2 * sizeof(int)); } __global__ void square (int n, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; //double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; xi = 2.0 * ajr * aji; xr = (ajr - aji) * (ajr + aji); yi = 2.0 * akr * aki; yr = (akr - aki) * (akr + aki); ajr = xr - yr; aji = xi + yi; akr = wkr * ajr + wki * aji; aki = wkr * aji - wki * ajr; a[j] = xr - akr; a[1 + j] = aki - xi; a[nminusj] = yr + akr; a[1 + nminusj] = aki - yi; } else { xr = a[0]; xi = a[1]; a[0] = xr * xr + xi * xi; a[1] = -xr * xi - xi * xr; xr = a[0 + m]; xi = a[1 + m]; a[1 + m] = -xr * xi - xi * xr; a[0 + m] = xr * xr - xi * xi; } } __global__ void square1 (int n, double *b, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; new_aji = 2.0 * ajr * aji; new_ajr = (ajr - aji) * (ajr + aji); new_aki = 2.0 * akr * aki; new_akr = (akr - aki) * (akr + aki); xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; b[j] = new_ajr - yr; b[1 + j] = yi - new_aji; b[nminusj] = new_akr + yr; b[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; b[0] = xr * xr + xi * xi; b[1] = -xr * xi - xi * xr; xr = a[0 + m]; xi = a[1 + m]; b[1 + m] = -xr * xi - xi * xr; b[0 + m] = xr * xr - xi * xi; } } __global__ void mult2 (double *g_out, double *a, double *b, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; xr = b[j]; xi = b[1 + j]; yr = b[nminusj]; yi = b[1 + nminusj]; new_aji = ajr * xi + xr * aji; new_ajr = ajr * xr - aji * xi; new_aki = akr * yi + yr * aki; new_akr = akr * yr - aki * yi; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b[0]; yi = b[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b[0 + m]; yi = b[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void mult3 (double *g_out, double *a, double *b, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki, bjr, bji, bkr, bki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; bjr = b[j]; bji = b[1 + j]; bkr = b[nminusj]; bki = b[1 + nminusj]; xr = bjr - bkr; xi = bji + bki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; bjr -= yr; bji -= yi; bkr += yr; bki -= yi; new_aji = ajr * bji + bjr * aji; new_ajr = ajr * bjr - aji * bji; new_aki = akr * bki + bkr * aki; new_akr = akr * bkr - aki * bki; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b[0]; yi = b[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b[0 + m]; yi = b[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void sub_mul (double *g_out, double *a, double *b1, double *b2, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki, bjr, bji, bkr, bki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; bjr = b1[j] - b2[j]; bji = b1[1 + j] - b2[1 + j]; bkr = b1[nminusj] - b2[nminusj]; bki = b1[1 + nminusj] - b2[1 + nminusj]; new_aji = ajr * bji + bjr * aji; new_ajr = ajr * bjr - aji * bji; new_aki = akr * bki + bkr * aki; new_akr = akr * bkr - aki * bki; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b1[0] - b2[0]; yi = b1[1] - b2[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b1[0 + m] - b2[0 + m]; yi = b1[1 + m] - b2[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void pre_mul (int n, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; a[j] = ajr; a[1 + j] = aji; a[nminusj] = akr; a[1 + nminusj] = aki; } } __device__ static double __rintd (double z) { double y; asm ("cvt.rni.f64.f64 %0, %1;": "=d" (y):"d" (z)); return (y); } __global__ void apply_weights (double *g_out, int *g_in, double *g_ttmp) { int val[2], test = 1; double ttp_temp[2]; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; val[0] = g_in[index]; val[1] = g_in[index + 1]; ttp_temp[0] = g_ttmp[index]; ttp_temp[1] = g_ttmp[index + 1]; if(ttp_temp[0] < 0.0) test = 0; if(ttp_temp[1] < 0.0) ttp_temp[1] = -ttp_temp[1]; g_out[index + 1] = (double) val[1] * ttp_temp[1]; ttp_temp[1] *= -g_ttp_inc[test]; g_out[index] = (double) val[0] * ttp_temp[1]; } __global__ void norm1a (double *g_in, int *g_data, int *g_xint, double *g_ttmp, int *g_carry, volatile float *g_err, float maxerr, int g_err_flag) { long long int bigint[2]; int val[2], numbits[2] = {g_qn[0],g_qn[0]}, mask[2], shifted_carry; double ttp_temp; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int index1 = blockIdx.x << 1; __shared__ int carry[1024 + 1]; { double tval[2], trint[2]; float ferr[2]; tval[0] = g_ttmp[index]; ttp_temp = g_ttmp[index + 1]; trint[0] = g_in[index]; trint[1] = g_in[index + 1]; if(tval[0] < 0.0) { numbits[0]++; tval[0] = -tval[0]; } if(ttp_temp < 0.0) { numbits[1]++; ttp_temp = -ttp_temp; } tval[1] = tval[0] * g_ttp_inc[numbits[0] == g_qn[0]]; tval[0] = trint[0] * tval[0]; tval[1] = trint[1] * tval[1]; trint[0] = RINT (tval[0]); ferr[0] = tval[0] - trint[0]; ferr[0] = fabs (ferr[0]); bigint[0] = (long long int) trint[0]; trint[1] = RINT (tval[1]); ferr[1] = tval[1] - trint[1]; ferr[1] = fabs (ferr[1]); bigint[1] = (long long int) trint[1]; mask[0] = -1 << numbits[0]; mask[1] = -1 << numbits[1]; if(ferr[0] < ferr[1]) ferr[0] = ferr[1]; if (ferr[0] > maxerr) atomicMax((int*) g_err, __float_as_int(ferr[0])); } val[1] = ((int) bigint[1]) & ~mask[1]; carry[threadIdx.x + 1] = (int) (bigint[1] >> numbits[1]); val[0] = ((int) bigint[0]) & ~mask[0]; val[1] += (int) (bigint[0] >> numbits[0]); __syncthreads (); if (threadIdx.x) val[0] += carry[threadIdx.x]; shifted_carry = val[1] - (mask[1] >> 1); val[1] = val[1] - (shifted_carry & mask[1]); carry[threadIdx.x] = shifted_carry >> numbits[1]; shifted_carry = val[0] - (mask[0] >> 1); val[0] = val[0] - (shifted_carry & mask[0]); val[1] += shifted_carry >> numbits[0]; __syncthreads (); if (threadIdx.x == (blockDim.x - 1)) { if (blockIdx.x == gridDim.x - 1) g_carry[0] = carry[threadIdx.x + 1] + carry[threadIdx.x]; else g_carry[blockIdx.x + 1] = carry[threadIdx.x + 1] + carry[threadIdx.x]; } if (threadIdx.x) { val[0] += carry[threadIdx.x - 1]; { g_in[index + 1] = (double) val[1] * ttp_temp; ttp_temp *= -g_ttp_inc[numbits[0] == g_qn[0]]; g_in[index] = (double) val[0] * ttp_temp; } if(g_err_flag) { g_xint[index + 1] = val[1]; g_xint[index] = val[0]; } } else { g_data[index1] = val[0]; g_data[index1 + 1] = val[1]; } } __global__ void norm1b (double *g_in, long long int *g_data, int *g_xint, double *g_ttmp, long long int *g_carry, volatile float *g_err, float maxerr, int g_err_flag) { long long int bigint[2], shifted_carry; int numbits[2] = {g_qn[0],g_qn[0]}, mask[2]; double ttp_temp; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int index1 = blockIdx.x << 1; __shared__ long long int carry[1024 + 1]; { double tval[2], trint[2]; float ferr[2]; tval[0] = g_ttmp[index]; ttp_temp = g_ttmp[index + 1]; trint[0] = g_in[index]; trint[1] = g_in[index + 1]; if(tval[0] < 0.0) { numbits[0]++; tval[0] = -tval[0]; } if(ttp_temp < 0.0) { numbits[1]++; ttp_temp = -ttp_temp; } tval[1] = tval[0] * g_ttp_inc[numbits[0] == g_qn[0]]; tval[0] = trint[0] * tval[0]; tval[1] = trint[1] * tval[1]; trint[0] = RINT (tval[0]); ferr[0] = tval[0] - trint[0]; ferr[0] = fabs (ferr[0]); bigint[0] = (long long int) trint[0]; trint[1] = RINT (tval[1]); ferr[1] = tval[1] - trint[1]; ferr[1] = fabs (ferr[1]); bigint[1] = (long long int) trint[1]; mask[0] = -1 << numbits[0]; mask[1] = -1 << numbits[1]; if(ferr[0] < ferr[1]) ferr[0] = ferr[1]; if (ferr[0] > maxerr) atomicMax((int*) g_err, __float_as_int(ferr[0])); } bigint[0] *= 3; bigint[1] *= 3; carry[threadIdx.x + 1] = (bigint[1] >> numbits[1]); bigint[1] = bigint[1] & ~mask[1]; bigint[1] += bigint[0] >> numbits[0]; bigint[0] = bigint[0] & ~mask[0]; __syncthreads (); if (threadIdx.x) bigint[0] += carry[threadIdx.x]; shifted_carry = bigint[1] - (mask[1] >> 1); bigint[1] = bigint[1] - (shifted_carry & mask[1]); carry[threadIdx.x] = shifted_carry >> numbits[1]; shifted_carry = bigint[0] - (mask[0] >> 1); bigint[0] = bigint[0] - (shifted_carry & mask[0]); bigint[1] += shifted_carry >> numbits[0]; __syncthreads (); if (threadIdx.x == (blockDim.x - 1)) { if (blockIdx.x == gridDim.x - 1) g_carry[0] = carry[threadIdx.x + 1] + carry[threadIdx.x]; else g_carry[blockIdx.x + 1] = carry[threadIdx.x + 1] + carry[threadIdx.x]; } if (threadIdx.x) { bigint[0] += carry[threadIdx.x - 1]; { g_in[index + 1] = (double) bigint[1] * ttp_temp; ttp_temp *= -g_ttp_inc[numbits[0] == g_qn[0]]; g_in[index] = (double) bigint[0] * ttp_temp; } if(g_err_flag) { g_xint[index + 1] = bigint[1]; g_xint[index] = bigint[0]; } } else { g_data[index1] = bigint[0]; g_data[index1 + 1] = bigint[1]; } } __global__ void norm2a (double *g_x, int *g_xint, int g_N, int threads1, int *g_data, int *g_carry, double *g_ttp1, int g_err_flag) { const int threadID = blockIdx.x * blockDim.x + threadIdx.x; const int threadID1 = threadID << 1; const int j = (threads1 * threadID) << 1; int temp0, temp1; int mask, shifted_carry, numbits= g_qn[0]; double temp; if (j < g_N) { temp0 = g_data[threadID1] + g_carry[threadID]; temp1 = g_data[threadID1 + 1]; temp = g_ttp1[threadID]; if(temp < 0.0) { numbits++; temp = -temp; } mask = -1 << numbits; shifted_carry = temp0 - (mask >> 1) ; temp0 = temp0 - (shifted_carry & mask); temp1 += (shifted_carry >> numbits); { g_x[j + 1] = temp1 * temp; temp *= -g_ttp_inc[numbits == g_qn[0]]; g_x[j] = temp0 * temp; } if(g_err_flag) { g_xint[j + 1] = temp1; g_xint[j] = temp0; } } } __global__ void norm2b (double *g_x, int *g_xint, int g_N, int threads1, long long int *g_data, long long int *g_carry, double *g_ttp1, int g_err_flag) { const int threadID = blockIdx.x * blockDim.x + threadIdx.x; const int threadID1 = threadID << 1; const int j = (threads1 * threadID) << 1; long long int shifted_carry, temp0, temp1; int mask, numbits = g_qn[0]; double temp; if (j < g_N) { temp0 = g_data[threadID1] + g_carry[threadID]; temp1 = g_data[threadID1 + 1]; temp = g_ttp1[threadID]; if(temp < 0.0) { numbits++; temp = -temp; } mask = -1 << numbits; shifted_carry = temp0 - (mask >> 1) ; temp0 = temp0 - (shifted_carry & mask); temp1 = temp1 + (shifted_carry >> numbits); g_x[j + 1] = temp1 * temp; temp *= -g_ttp_inc[numbits == g_qn[0]]; g_x[j] = temp0 * temp; if(g_err_flag) { g_xint[j + 1] = temp1; g_xint[j] = temp0; } } } __global__ void copy_kernel (double *save, double *y) { const int threadID = (blockIdx.x * blockDim.x + threadIdx.x) << 1; save[threadID] = y[threadID]; save[threadID + 1] = y[threadID + 1]; } /**************************************************************************** * Erato * ***************************************************************************/ //Many thanks to Ben Buhrow. typedef unsigned char u8; typedef unsigned int uint32; typedef unsigned char uint8; typedef unsigned short uint16; typedef long long unsigned int uint64; const int threadsPerBlock = 256; const uint32 block_size = 8192; const int startprime = 8; __constant__ uint32 _step5[5] = {2418280706,604570176,151142544,37785636,1083188233}; __constant__ uint32 _step7[7] = {1107363844,69210240,2151809288,134488080, 276840961,17302560,537952322}; __constant__ uint32 _step11[11] = {33816584,1073774848,135266336,132096,541065345, 528384,2164261380,2113536,67110928,8454146,268443712}; __constant__ uint32 _step13[13] = {1075838992,16809984,262656,536875016,8388672, 67239937,1050624,2147500064,33554688,268959748,4202496, 65664,134218754}; __constant__ uint32 _step17[17] = {268435488,1073741952,512,2049,8196,32784,131136, 524544,2098176,8392704,33570816,134283264,537133056, 2148532224,4194304,16777218,67108872}; __constant__ uint32 _step19[19] = {2147483712,4096,262176,16779264,1073872896,8388608, 536870928,1024,65544,4194816,268468224,2097152,134217732, 256,16386,1048704,67117056,524288,33554433}; __global__ static void SegSieve(uint32 *primes, int maxp, int nump, uint32 N, uint8 *results) { /* expect as input a set of primes to sieve with, how many of those primes there are (maxp) how many primes each thread will be responsible for (nump), and the maximum index that we need to worry about for the requested sieve interval. Also, an array into which we can put this block's count of primes. This routine implements a segmented sieve using a wheel mod 6. Each thread block on the gpu sieves a different segment of the number line. Each thread within each block simultaneously sieves a small set of primes, marking composites within shared memory. There is no memory contention between threads because the marking process is write only. Because each thread block starts at a different part of the number line, a small amount of computation must be done for each prime prior to sieving to figure out where to start. After sieving is done, each thread counts primes in part of the shared memory space; the final count is returned in the provided array for each block. The host cpu will do the final sum over blocks. Note, it would not be much more difficult to compute and return the primes in the block instead of just the count, but it would be slower due to the extra memory transfer required. */ uint32 i,j,k; uint32 maxID = (N + 1) / 3; uint32 bid = blockIdx.y * gridDim.x + blockIdx.x; uint32 range = block_size / threadsPerBlock; __shared__ uint8 locsieve[block_size]; __shared__ uint32 bitsieve[block_size / 32]; // everyone init the array. if ((bid+1)*block_size > maxID) { for (j=threadIdx.x * range, k=0; k<range; k++) { // we're counting hits in the kernel as well, so clear the bytes representing primes > N if ((bid * block_size + j + k) < maxID) locsieve[j+k] = 1; else locsieve[j+k] = 0; } } else { for (j=threadIdx.x * range/4, k=0; k<range/4; k++) { ((uint32 *) locsieve)[j+k] = 0x01010101; } } // the smallest primes are dealt with a bit differently. They are sieved in a separate // shared memory space in a packed bit array. constant memory holds pre-computed // information about where each prime lands within a given 32 bit region. each thread // in the block will use this info to simultaneously sieve a small portion of the // packed bit array (that way we make use of the broadcast capabilities of constant memory). // When counting or computing primes, we then have to check both the packed bit array as // well as the regular byte array, but overall it is a win to greatly speed up the // sieving of the smallest primes. // compute starting offset for prime 5: i = (bid * 256 + threadIdx.x) % 5; // then sieve prime 5 in the bit array bitsieve[threadIdx.x] = _step5[i]; // compute starting offset for prime 7: i = (bid * 256 + threadIdx.x) % 7; // then sieve prime 7 in the bit array bitsieve[threadIdx.x] |= _step7[i]; // compute starting offset for prime 11: i = (bid * 256 + threadIdx.x) % 11; // then sieve prime 11 in the bit array bitsieve[threadIdx.x] |= _step11[i]; // compute starting offset for prime 13: i = (bid * 256 + threadIdx.x) % 13; // then sieve prime 13 in the bit array bitsieve[threadIdx.x] |= _step13[i]; // compute starting offset for prime 17: i = (bid * 256 + threadIdx.x) % 17; // then sieve prime 17 in the bit array bitsieve[threadIdx.x] |= _step17[i]; // compute starting offset for prime 19: i = (bid * 256 + threadIdx.x) % 19; // then sieve prime 19 in the bit array bitsieve[threadIdx.x] |= _step19[i]; // regroup before sieving __syncthreads(); // now sieve the array for (j=0; j<nump; j++) { int pid = (j * threadsPerBlock) + threadIdx.x + startprime; if (pid < maxp) { uint32 p = primes[pid]; uint32 pstart = p/3; uint32 p2 = 2*p; uint32 block_start = bid * block_size; uint32 start_offset; uint32 s[2]; // the wheel sieve with all multiples of 2 and 3 removed from the array is equivalent to // alternately stepping through the number line by (p+2)*mult, (p-2)*mult, // where mult = (p+1)/6 s[0] = p+(2*((p+1)/6)); s[1] = p-(2*((p+1)/6)); // compute the starting location of this prime in this block if ((bid == 0) || (pstart >= block_start)) { // start one increment past the starting value of p/3, since // we want to count the prime itself as a prime. start_offset = pstart + s[0] - block_start; k = 1; } else { // measure how far the start of this block is from where the prime first landed, // as well as how many complete (+2/-2) steps it would need to take // to cover that distance uint32 dist = (block_start - pstart); uint32 steps = dist / p2; if ((dist % p2) == 0) { // if the number of steps is exact, then we hit the start // of this block exactly, and we start below with the +2 step. start_offset = 0; k = 0; } else { uint32 inc = pstart + steps * p2 + s[0]; if (inc >= block_start) { // if the prime reaches into this block on the first stride, // then start below with the -2 step start_offset = inc - block_start; k = 1; } else { // we need both +2 and -2 strides to get into the block, // so start below with the +2 stride. start_offset = inc + s[1] - block_start; k = 0; } } } // unroll the loop for the smallest primes. if (p < 1024) { uint32 stop = block_size - (2 * p * 4); if (k == 0) { for(i=start_offset ;i < stop; i+=8*p) { locsieve[i] = 0; locsieve[i+s[0]] = 0; locsieve[i+p2] = 0; locsieve[i+p2+s[0]] = 0; locsieve[i+4*p] = 0; locsieve[i+4*p+s[0]] = 0; locsieve[i+6*p] = 0; locsieve[i+6*p+s[0]] = 0; } } else { for(i=start_offset ;i < stop; i+=8*p) { locsieve[i] = 0; locsieve[i+s[1]] = 0; locsieve[i+p2] = 0; locsieve[i+p2+s[1]] = 0; locsieve[i+4*p] = 0; locsieve[i+4*p+s[1]] = 0; locsieve[i+6*p] = 0; locsieve[i+6*p+s[1]] = 0; } } } else i=start_offset; // alternate stepping between the large and small strides this prime takes. for( ;i < block_size; k = !k) { locsieve[i] = 0; i += s[k]; } } } // regroup before counting __syncthreads(); for (j=threadIdx.x * range, k=0; k<range; k++) locsieve[j + k] = (locsieve[j+k] & ((bitsieve[(j+k) >> 5] & (1 << ((j+k) & 31))) == 0)); __syncthreads(); if(threadIdx.x == 0) for (k=0; k < block_size; k++) { j = ((bid * block_size + k) * 3 + 1) >> 1; if(j < N >> 1) results[j] = locsieve[k]; } } uint32 tiny_soe(uint32 limit, uint32 *primes) { //simple sieve of erathosthenes for small limits - not efficient //for large limits. uint8 *flags; uint16 prime; uint32 i,j; int it; //allocate flags flags = (uint8 *)malloc(limit/2 * sizeof(uint8)); if (flags == NULL) printf("error allocating flags\n"); memset(flags,1,limit/2); //find the sieving primes, don't bother with offsets, we'll need to find those //separately for each line in the main sieve. primes[0] = 2; it=1; //sieve using primes less than the sqrt of the desired limit //flags are created only for odd numbers (mod2) for (i=1;i<(uint32)(sqrt((double)limit)/2+1);i++) { if (flags[i] > 0) { prime = (uint32)(2*i + 1); for (j=i+prime;j<limit/2;j+=prime) flags[j]=0; primes[it]=prime; it++; } } //now find the rest of the prime flags and compute the sieving primes for (;i<limit/2;i++) { if (flags[i] == 1) { primes[it] = (uint32)(2*i + 1); it++; } } free(flags); return it; } int gtpr(int n, uint8* bprimes) { uint32 Nsmall = (uint32) sqrt((double) n); int numblocks; int primes_per_thread; uint32* primes; uint32* device_primes; uint32 np; uint8* results; // find seed primes primes = (uint32*)malloc(Nsmall*sizeof(uint32)); np = tiny_soe(Nsmall, primes); // put the primes on the device hipMalloc((void**) &device_primes, sizeof(uint32) * np); hipMemcpy(device_primes, primes, sizeof(uint32)*np, hipMemcpyHostToDevice); // compute how many whole blocks we have to sieve and how many primes each // thread will be responsible for. numblocks = (n / 3 / block_size + 1); primes_per_thread = ((np - startprime) + threadsPerBlock - 1) / threadsPerBlock; dim3 grid((uint32)sqrt((double)numblocks)+1,(uint32)sqrt((double)numblocks)+1); hipMalloc((void**) &results, sizeof(uint8) * (n >> 1)); hipMemset(results, 0, sizeof(uint8) * (n >> 1)); hipLaunchKernelGGL(( SegSieve), dim3(grid), dim3(threadsPerBlock), 0, 0, device_primes, np, primes_per_thread, n, results); hipDeviceSynchronize(); hipMemcpy (bprimes, results, sizeof (uint8) * (n >> 1), hipMemcpyDeviceToHost); hipFree(device_primes); hipFree(results); free(primes); return 0; } /************************************************************** * * FFT and other related Functions * **************************************************************/ /* rint is not ANSI compatible, so we need a definition for * WIN32 and other platforms with rint. * Also we use that to write the trick to rint() */ /**************************************************************************** * Lucas Test - specific routines * ***************************************************************************/ void reset_err(float* maxerr, float value) { *maxerr *= value; cutilSafeCall (hipMemcpy (g_err, maxerr, sizeof (float), hipMemcpyHostToDevice)); } float lucas_square (/*double *x,*/ int q, int n, int iter, int last, float* maxerr, int error_flag, int bit, int stage, int chkpt) { float terr = 0.0; if (iter < 100 && iter % 10 == 0) { cutilSafeCall (hipMemcpy (&terr, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(terr > *maxerr) *maxerr = terr; } cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( square) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, n, g_x, g_ct); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); if(!bit) { hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, *maxerr, chkpt); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_x, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, chkpt); } else { hipLaunchKernelGGL(( norm1b) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_x, g_datal, g_xint, g_ttmp, g_carryl, g_err, *maxerr, chkpt); hipLaunchKernelGGL(( norm2b) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_x, g_xint, n, threads1, g_datal, g_carryl, g_ttp1, chkpt); } if (error_flag) { cutilSafeCall (hipMemcpy (&terr, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(terr > *maxerr) *maxerr = terr; } else if (polite_f && (iter % polite) == 0) cutilSafeThreadSync(); return (terr); } void init_x_int(int *x_int, unsigned *x_packed, int q, int n, int *stage) { int j; if(*stage == 0) { *stage = 1; for(j = 0; j < n; j++) x_int[j] = 0; x_int[0] = 1; if(x_packed) { for(j = 0; j < (q + 31) /32; j++) x_packed[j] = 0; x_packed[0] = 1; } } hipMemcpy (g_xint, x_int, sizeof (int) * n , hipMemcpyHostToDevice); } void E_init_d(double *g, double value, int n) { double x[1] = {value}; cutilSafeCall (hipMemset (g, 0.0, sizeof (double) * n)); hipMemcpy (g, x, sizeof (double) , hipMemcpyHostToDevice); } void E_pre_mul(double *g_out, double *g_in, int n, int fft_f) { if(fft_f) cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_in, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( pre_mul) , dim3(n / (4 * threads2)), dim3(threads2), 0, 0, n, g_out, g_ct); } void E_mul(double *g_out, double *g_in1, double *g_in2, int n, float err, int fft_f) { if(fft_f) cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_in1, (hipfftDoubleComplex *) g_in1, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( mult3) , dim3(n / (4 * threads2)), dim3(threads2), 0, 0, g_out, g_in1, g_in2, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); } void E_sub_mul(double *g_out, double *g_in1, double *g_in2, double *g_in3, int n, float err, int chkpt) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_in1, (hipfftDoubleComplex *) g_in1, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( sub_mul) , dim3(n / (4 * threads2)), dim3(threads2), 0, 0, g_out, g_in1, g_in2, g_in3, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, &g_xint[n], g_ttmp, g_carryi, g_err, err, chkpt); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, &g_xint[n], n, threads1, g_datai, g_carryi, g_ttp1, chkpt); } void E_half_mul(double *g_out, double *g_in1, double *g_in2, int n, float err) { hipLaunchKernelGGL(( mult2) , dim3(n / (4 * threads2)), dim3(threads2), 0, 0, g_out, g_in1, g_in2, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); } int E_to_the_p(double *g_out, double *g_in, mpz_t p, int n, int trans, float *err) { // Assume g_in is premultiplied int last, j; int checksync = trans / (2 * 50) * 2 * 50; int checkerror = trans / (200) * 200; int checksave = trans / (2 * checkpoint_iter) * 2 * checkpoint_iter; int sync = 1; last = mpz_sizeinbase (p, 2); if (last == 1) { E_init_d(g_out, 1.0, n); if(mpz_tstbit (p, last - 1)) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( mult2) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, g_out, g_out, g_in, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } return trans; } hipLaunchKernelGGL(( square1) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, n, g_out, g_in, g_ct); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; cutilSafeCall (hipMemcpy (err, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(mpz_tstbit (p, last - 2)) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( mult2) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, g_out, g_out, g_in, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } for(j = 3; j <= last && !quitting; j++) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( square) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, n, g_out, g_ct); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; if(mpz_tstbit (p, last - j)) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( mult2) , dim3(n / (4 * threads2)), dim3(threads2) , 0, 0, g_out, g_out, g_in, g_ct, n); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_out, (hipfftDoubleComplex *) g_out, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads1)), dim3(threads1) , 0, 0, g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads1) + threads3 - 1) / threads3), dim3(threads3) , 0, 0, g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } if(trans - checkerror > 200) { sync = 0; checkerror += 200; cutilSafeCall (hipMemcpy (err, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(*err > 0.4) quitting = 2; } if(trans - checksave > 2 * checkpoint_iter) { checksave += 2 * checkpoint_iter; reset_err(err, 0.85f); } if(sync && polite_f && trans - checksync > 2 * polite) { checksync += 2 * polite; cutilSafeThreadSync(); } sync = 1; fflush(NULL); } return trans; } /* -------- initializing routines -------- */ void makect (int nc, double *c) { int j; double d = (double) (nc << 1); for (j = 1; j <= nc; j++) c[j] = 0.5 * cospi (j / d); } void alloc_gpu_mem(int n) { cufftSafeCall (hipfftPlan1d (&plan, n / 2, HIPFFT_Z2Z, 1)); cutilSafeCall (hipMalloc ((void **) &g_x, sizeof (double) * n )); cutilSafeCall (hipMalloc ((void **) &g_ct, sizeof (double) * n / 4)); cutilSafeCall (hipMalloc ((void **) &g_xint, sizeof (int) * 2 * n )); cutilSafeCall (hipMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (hipMalloc ((void **) &g_ttmp, sizeof (double) * n)); cutilSafeCall (hipMalloc ((void **) &g_ttp1, sizeof (double) * 2 * n / threads1)); cutilSafeCall (hipMalloc ((void **) &g_datai, sizeof (int) * 2 * n / threads1)); cutilSafeCall (hipMalloc ((void **) &g_datal, sizeof (long long int) * 2 * n / threads1)); cutilSafeCall (hipMemset (g_err, 0, sizeof (float))); cutilSafeCall (hipMalloc ((void **) &g_carryl, sizeof (long long int) * n / threads1)); cutilSafeCall (hipMalloc ((void **) &g_carryi, sizeof (int) * n / threads1)); } void write_gpu_data(int q, int n) { double *s_ttmp, *s_ttp1, *s_ct; int i, j, qn = q / n, b = q % n; int a, c, bj; double *h_ttp_inc; int *h_qn; s_ct = (double *) malloc (sizeof (double) * (n / 4)); s_ttmp = (double *) malloc (sizeof (double) * (n)); s_ttp1 = (double *) malloc (sizeof (double) * 2 * (n / threads1)); size = (char *) malloc (sizeof (char) * n); h_ttp_inc = (double *) malloc (sizeof (double) * 2); h_qn = (int *) malloc (sizeof (int) * 2); c = n - b; bj = 0; for (j = 1; j < n; j++) { bj += b; bj %= n; a = bj - n; if(j % 2 == 0) s_ttmp[j] = exp2 (a / (double) n) * 2.0 / n; else s_ttmp[j] = exp2 (-a / (double) n); size[j] = (bj >= c); if(size[j]) s_ttmp[j] = -s_ttmp[j]; } size[0] = 1; s_ttmp[0] = -2.0 / n; size[n-1] = 0; s_ttmp[n-1] = -s_ttmp[n-1]; for (i = 0, j = 0; i < n ; i += 2 * threads1) { s_ttp1[j] = abs(s_ttmp[i + 1]); if(size[i]) s_ttp1[j] = -s_ttp1[j]; j++; } makect (n / 4, s_ct); h_ttp_inc[0] = -exp2((b-n) / (double) n); h_ttp_inc[1] = -exp2(b / (double) n); set_ttp_inc(h_ttp_inc); h_qn[0] = qn; h_qn[1] = n; set_qn(h_qn); cutilSafeCall(hipMemcpy (g_ttmp, s_ttmp, sizeof (double) * n, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy (g_ttp1, s_ttp1, sizeof (double) * 2 * n / threads1, hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy (g_ct, s_ct, sizeof (double) * (n / 4), hipMemcpyHostToDevice)); free ((char *) s_ct); free ((char *) s_ttmp); free ((char *) s_ttp1); free ((char *) h_ttp_inc); free ((char *) h_qn); } void free_host (int *x_int) { free ((char *) size); free ((char *) x_int); } void free_gpu(void) { cufftSafeCall (hipfftDestroy (plan)); cutilSafeCall (hipFree ((char *) g_x)); cutilSafeCall (hipFree ((char *) g_ct)); cutilSafeCall (hipFree ((char *) g_xint)); cutilSafeCall (hipFree ((char *) g_err)); cutilSafeCall (hipFree ((char *) g_ttp1)); cutilSafeCall (hipFree ((char *) g_ttmp)); cutilSafeCall (hipFree ((char *) g_datai)); cutilSafeCall (hipFree ((char *) g_datal)); cutilSafeCall (hipFree ((char *) g_carryl)); cutilSafeCall (hipFree ((char *) g_carryi)); } void close_lucas (int *x_int) { free_host(x_int); free_gpu(); } /************************************************************************** * * * End LL/GPU Functions, Begin Utility/CPU Functions * * * **************************************************************************/ void init_threads(int n) { FILE *threads; char buf[132]; char threadfile[32]; int no_file = 0, no_entry = 1; int th1 = 0, th2 = 0, th3 = 0; int temp_n; sprintf (threadfile, "%s threads.txt", dev.name); threads = fopen(threadfile, "r"); if(threads) { while(fgets(buf, 132, threads) != NULL) { sscanf(buf, "%d %d %d %d", &temp_n, &th1, &th2, &th3); if(n == temp_n * 1024) { threads1 = th1; threads2 = th2; threads3 = th3; no_entry = 0; } } } else no_file = 1; if(no_file || no_entry) { if(no_file) printf("No %s file found. Using default thread sizes.\n", threadfile); else if(no_entry) printf("No entry for fft = %dk found. Using default thread sizes.\n", n / 1024); printf("For optimal thread selection, please run\n"); printf("./CUDAPm1 -cufftbench %d %d r\n", n / 1024, n / 1024); printf("for some small r, 0 < r < 6 e.g.\n"); fflush(NULL); } return; } int init_ffts() { //#define COUNT 139 FILE *fft; char buf[132]; int next_fft, j = 0, i = 0; int first_found = 0; #define COUNT 160 int default_mult[COUNT] = { //this batch from GTX570 timings 2, 8, 10, 14, 16, 18, 20, 32, 36, 42, 48, 50, 56, 60, 64, 70, 80, 84, 96, 112, 120, 126, 128, 144, 160, 162, 168, 180, 192, 224, 256, 288, 320, 324, 336, 360, 384, 392, 400, 448, 512, 576, 640, 648, 672, 720, 768, 784, 800, 864, 896, 900, 1024, 1152, 1176, 1280, 1296, 1344, 1440, 1568, 1600, 1728, 1792, 2048, 2160, 2304, 2352, 2592, 2688, 2880, 3024, 3136, 3200, 3584, 3600, 4096, 4320, 4608, 4704, 5120, 5184, 5600, 5760, 6048, 6144, 6272, 6400, 6480, 7168, 7200, 7776, 8064, 8192, 8640, 9216, 9408, 10240, 10368, 10584, 10800, 11200, 11520, 12096, 12288, 12544, 12960, 13824, 14336, 14400, 16384, 17496, 18144, 19208, 19600, 20000, 20250, 21952, 23328, 23814, 24300, 24500, 25088, 25600, 26244, 27000, 27216, 28000, 28672, 31104, 31250, 32000, 32400, 32768, 33614, 34992, 36000, 36288, 38416, 39200, 39366, 40500, 41472, 42336, 43200, 43904, 47628, 49000, 50000, 50176, 51200, 52488, 54432, 55296, 56000, 57344, 60750, 62500, 64000, 64800, 65536 }; char fftfile[32]; sprintf (fftfile, "%s fft.txt", dev.name); fft = fopen(fftfile, "r"); if(!fft) { printf("No %s file found. Using default fft lengths.\n", fftfile); printf("For optimal fft selection, please run\n"); printf("./CUDAPm1 -cufftbench 1 8192 r\n"); printf("for some small r, 0 < r < 6 e.g.\n"); fflush(NULL); for(j = 0; j < COUNT; j++) multipliers[j] = default_mult[j]; } else { while(fgets(buf, 132, fft) != NULL) { int le = 0; sscanf(buf, "%d", &le); if(next_fft = atoi(buf)) { if(!first_found) { while(i < COUNT && default_mult[i] < next_fft) { multipliers[j] = default_mult[i]; i++; j++; } multipliers[j] = next_fft; j++; first_found = 1; } else { multipliers[j] = next_fft; j++; } } } while(default_mult[i] < multipliers[j - 1] && i < COUNT) i++; while(i < COUNT) { multipliers[j] = default_mult[i]; j++; i++; } fclose(fft); } return j; } int choose_fft_length (int q, int *index) { /* In order to increase length if an exponent has a round off issue, we use an extra paramter that we can adjust on the fly. In check(), index starts as -1, the default. In that case, choose from the table. If index >= 0, we must assume it's an override index and return the corresponding length. If index > table-count, then we assume it's a manual fftlen and return the proper index. */ if( 0 < *index && *index < fft_count ) return 1024*multipliers[*index]; else if( *index >= fft_count || q == 0) { /* override with manual fftlen passed as arg; set pointer to largest index <= fftlen */ int len, i; for(i = fft_count - 1; i >= 0; i--) { len = 1024*multipliers[i]; if( len <= *index ) { *index = i; return len; /* not really necessary, but now we could decide to override fftlen with this value */ } } } else { // *index < 0, not override, choose length and set pointer to proper index int i; int estimate = ceil(1.01 * 0.0000358738168878758 * exp (1.0219834608 * log ((double) q))); for(i = 0; i < fft_count; i++) { if(multipliers[i] >= estimate) { *index = i; printf("Index %d\n",*index); return multipliers[i] * 1024; } } } return 0; } int fft_from_str(const char* str) /* This is really just strtoul with some extra magic to deal with K or M */ { char* endptr; const char* ptr = str; int len, mult = 0; while( *ptr ) { if( *ptr == 'k' || *ptr == 'K' ) { mult = 1024; break; } if( *ptr == 'm' || *ptr == 'M' ) { mult = 1024*1024; break; } ptr++; } if( !mult ) { // No K or M, treat as before (PS The Python else clause on loops I mention in parse.c would be useful here :) ) mult = 1; } len = (int) strtoul(str, &endptr, 10)*mult; if( endptr != ptr ) { // The K or M must directly follow the num (or the num must extend to the end of the str) fprintf (stderr, "can't parse fft length \"%s\"\n\n", str); exit (2); } return len; } //From apsen void print_time_from_seconds (int sec) { if (sec > 3600) { printf ("%d", sec / 3600); sec %= 3600; printf (":%02d", sec / 60); } else printf ("%d", sec / 60); sec %= 60; printf (":%02d", sec); } void init_device (int device_number) { int device_count = 0; hipGetDeviceCount (&device_count); if (device_number >= device_count) { printf ("device_number >= device_count ... exiting\n"); printf ("(This is probably a driver problem)\n\n"); exit (2); } hipSetDevice (device_number); hipSetDeviceFlags (hipDeviceScheduleBlockingSync); hipGetDeviceProperties (&dev, device_number); // From Iain if (dev.major == 1 && dev.minor < 3) { printf("A GPU with compute capability >= 1.3 is required for double precision arithmetic\n\n"); exit (2); } if (d_f) { printf ("------- DEVICE %d -------\n", device_number); printf ("name %s\n", dev.name); printf ("Compatibility %d.%d\n", dev.major, dev.minor); printf ("clockRate (MHz) %d\n", dev.clockRate/1000); printf ("memClockRate (MHz) %d\n", dev.memoryClockRate/1000); #ifdef _MSC_VER printf ("totalGlobalMem %Iu\n", dev.totalGlobalMem); #else printf ("totalGlobalMem %zu\n", dev.totalGlobalMem); #endif #ifdef _MSC_VER printf ("totalConstMem %Iu\n", dev.totalConstMem); #else printf ("totalConstMem %zu\n", dev.totalConstMem); #endif printf ("l2CacheSize %d\n", dev.l2CacheSize); #ifdef _MSC_VER printf ("sharedMemPerBlock %Iu\n", dev.sharedMemPerBlock); #else printf ("sharedMemPerBlock %zu\n", dev.sharedMemPerBlock); #endif printf ("regsPerBlock %d\n", dev.regsPerBlock); printf ("warpSize %d\n", dev.warpSize); #ifdef _MSC_VER printf ("memPitch %Iu\n", dev.memPitch); #else printf ("memPitch %zu\n", dev.memPitch); #endif printf ("maxThreadsPerBlock %d\n", dev.maxThreadsPerBlock); printf ("maxThreadsPerMP %d\n", dev.maxThreadsPerMultiProcessor); printf ("multiProcessorCount %d\n", dev.multiProcessorCount); printf ("maxThreadsDim[3] %d,%d,%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]); printf ("maxGridSize[3] %d,%d,%d\n", dev.maxGridSize[0], dev.maxGridSize[1], dev.maxGridSize[2]); #ifdef _MSC_VER printf ("textureAlignment %Iu\n", dev.textureAlignment); #else printf ("textureAlignment %zu\n", dev.textureAlignment); #endif printf ("deviceOverlap %d\n\n", dev.deviceOverlap); } } void rm_checkpoint (int q, int ks1) { char chkpnt_cfn[32]; char chkpnt_tfn[32]; if(!ks1) { sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); (void) unlink (chkpnt_cfn); (void) unlink (chkpnt_tfn); } sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); (void) unlink (chkpnt_cfn); (void) unlink (chkpnt_tfn); } int standardize_digits_int (int *x_int, int q, int n, int offset, int num_digits) { int j, digit, stop, qn = q / n, carry = 0; int temp; int lo = 1 << qn; int hi = lo << 1; digit = floor(offset * (n / (double) q)); j = (n + digit - 1) % n; while(x_int[j] == 0 && j != digit) j = (n + j - 1) % n; if(j == digit && x_int[digit] == 0) return(1); else if (x_int[j] < 0) carry = -1; { stop = (digit + num_digits) % n; j = digit; do { x_int[j] += carry; carry = 0; if (size[j]) temp = hi; else temp = lo; if(x_int[j] < 0) { x_int[j] += temp; carry = -1; } j = (j + 1) % n; } while(j != stop); } return(0); } void balance_digits_int(int* x, int q, int n) { int half_low = (1 << (q / n - 1)); int low = half_low << 1; int high = low << 1; int upper, adj, carry = 0; int j; for(j = 0; j < n; j++) { if(size[j]) { upper = low; adj = high; } else { upper = half_low; adj = low; } x[j] += carry; carry = 0; if(x[j] >= upper) { x[j] -= adj; carry = 1; } } x[0] += carry; // Good enough for our purposes. } unsigned * read_checkpoint_packed (int q) { //struct stat FileAttrib; FILE *fPtr; unsigned *x_packed; char chkpnt_cfn[32]; char chkpnt_tfn[32]; int end = (q + 31) / 32; x_packed = (unsigned *) malloc (sizeof (unsigned) * (end + 25)); sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); fPtr = fopen (chkpnt_cfn, "rb"); if (!fPtr) { //#ifndef _MSC_VER //if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the checkpoint file. Trying the backup file.\n"); //#endif } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose(fPtr); } else { fclose(fPtr); return x_packed; } fPtr = fopen(chkpnt_tfn, "rb"); if (!fPtr) { //#ifndef _MSC_VER // if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the backup file. Restarting test.\n"); //#endif } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose(fPtr); } else { fclose(fPtr); return x_packed; } x_packed[end] = q; x_packed[end + 1] = 0; // n x_packed[end + 2] = 1; // iteration number x_packed[end + 3] = 0; // stage x_packed[end + 4] = 0; // accumulated time x_packed[end + 5] = 0; // b1 // 6-9 reserved for extending b1 // 10-24 reserved for stage 2 int i; for(i = 6; i < 25; i++) x_packed[end + i] = 0; return x_packed; } int read_st2_checkpoint (int q, unsigned *x_packed) { //struct stat FileAttrib; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; int end = (q + 31) / 32; sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); fPtr = fopen (chkpnt_cfn, "rb"); if (!fPtr) { // if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the checkpoint file. Trying the backup file.\n"); } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose(fPtr); } else { fclose(fPtr); return 1; } fPtr = fopen(chkpnt_tfn, "rb"); if (!fPtr) { //if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the backup file. Restarting test.\n"); } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n");; fclose(fPtr); } else { fclose(fPtr); return 1; } return 0; } void pack_bits_int(int *x_int, unsigned *packed_x, int q , int n) { unsigned long long temp1, temp2 = 0; int i, j = 0, k = 0; int qn = q / n; for(i = 0; i < n; i++) { temp1 = x_int[i]; temp2 += (temp1 << k); k += qn + size[i]; if(k >= 32) { packed_x[j] = (unsigned) temp2; temp2 >>= 32; k -= 32; j++; } } packed_x[j] = (unsigned) temp2; } void set_checkpoint_data(unsigned *x_packed, int q, int n, int j, int stage, int time) { int end = (q + 31) / 32; x_packed[end + 0] = q; x_packed[end + 1] = n; x_packed[end + 2] = j; x_packed[end + 3] = stage; x_packed[end + 4] = time; } void write_checkpoint_packed (unsigned *x_packed, int q) { int end = (q + 31) / 32; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); (void) unlink (chkpnt_tfn); (void) rename (chkpnt_cfn, chkpnt_tfn); fPtr = fopen (chkpnt_cfn, "wb"); if (!fPtr) { fprintf(stderr, "Couldn't write checkpoint.\n"); return; } fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); if (s_f > 0) // save all checkpoint files { char chkpnt_sfn[64]; #ifndef _MSC_VER sprintf (chkpnt_sfn, "%s/s" "%d.%d.%s", folder, q, x_packed[end + 2], s_residue); #else sprintf (chkpnt_sfn, "%s\\s" "%d.%d.%s.txt", folder, q, x_packed[end + 2], s_residue); #endif fPtr = fopen (chkpnt_sfn, "wb"); if (!fPtr) return; fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); } } void write_st2_checkpoint (unsigned *x_packed, int q) { int end = (q + 31) / 32; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); (void) unlink (chkpnt_tfn); (void) rename (chkpnt_cfn, chkpnt_tfn); fPtr = fopen (chkpnt_cfn, "wb"); if (!fPtr) { fprintf(stderr, "Couldn't write checkpoint.\n"); return; } fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); if (s_f > 0) // save all checkpoint files { char chkpnt_sfn[64]; #ifndef _MSC_VER sprintf (chkpnt_sfn, "%s/s" "%d.%d.%s", folder, q, x_packed[end + 2], s_residue); #else sprintf (chkpnt_sfn, "%s\\s" "%d.%d.%s.txt", folder, q, x_packed[end + 2], s_residue); #endif fPtr = fopen (chkpnt_sfn, "wb"); if (!fPtr) return; fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); } } int printbits_int (int *x_int, int q, int n, int offset, FILE* fp, char *expectedResidue, int o_f) { int j, k = 0; int digit, bit; unsigned long long temp, residue = 0; digit = floor(offset * (n / (double) q)); bit = offset - ceil(digit * (q / (double) n)); j = digit; while(k < 64) { temp = x_int[j]; residue = residue + (temp << k); k += q / n + size[j % n]; if(j == digit) { k -= bit; residue >>= bit; } j = (j + 1) % n; } sprintf (s_residue, "%016llx", residue); printf ("M%d, 0x%s,", q, s_residue); //if(o_f) printf(" offset = %d,", offset); printf (" n = %dK, %s", n/1024, program); if (fp) { fprintf (fp, "M%d, 0x%s,", q, s_residue); if(o_f) fprintf(fp, " offset = %d,", offset); fprintf (fp, " n = %dK, %s", n/1024, program); } return 0; } void unpack_bits_int(int *x_int, unsigned *packed_x, int q , int n) { unsigned long long temp1, temp2 = 0; int i, j = 0, k = 0; int qn = q / n; int mask1 = -1 << (qn + 1); int mask2; int mask; mask1 = ~mask1; mask2 = mask1 >> 1; for(i = 0; i < n; i++) { if(k < qn + size[i]) { temp1 = packed_x[j]; temp2 += (temp1 << k); k += 32; j++; } if(size[i]) mask = mask1; else mask = mask2; x_int[i] = ((int) temp2) & mask; temp2 >>= (qn + size[i]); k -= (qn + size[i]); } } int* init_lucas_packed_int(unsigned * x_packed, int q , int *n, int *j, int *stage, int *total_time) { int *x; int new_n, old_n; int end = (q + 31) / 32; int new_test = 0; *n = x_packed[end + 1]; if(*n == 0) new_test = 1; *j = x_packed[end + 2]; *stage = x_packed[end + 3]; if(total_time) *total_time = x_packed[end + 4]; old_n = fftlen; if(fftlen == 0) fftlen = *n; new_n = choose_fft_length(q, &fftlen); if(old_n > fft_count) *n = old_n; else if (new_test || old_n) *n = new_n; init_threads(*n); printf("Using threads: norm1 %d, mult %d, norm2 %d.\n", threads1, threads2, threads3); if ((*n / (2 * threads1)) > dev.maxGridSize[0]) { fprintf (stderr, "over specifications Grid = %d\n", (int) *n / (2 * threads1)); fprintf (stderr, "try increasing norm1 threads (%d) or decreasing FFT length (%dK)\n\n", threads1, *n / 1024); return NULL; } if ((*n / (4 * threads2)) > dev.maxGridSize[0]) { fprintf (stderr, "over specifications Grid = %d\n", (int) *n / (4 * threads2)); fprintf (stderr, "try increasing mult threads (%d) or decreasing FFT length (%dK)\n\n", threads2, *n / 1024); return NULL; } if ((*n % (2 * threads1)) != 0) { fprintf (stderr, "fft length %d must be divisible by 2 * norm1 threads %d\n", *n, threads1); return NULL; } if ((*n % (4 * threads2)) != 0) { fprintf (stderr, "fft length %d must be divisible by 4 * mult threads %d\n", *n, threads2); return NULL; } if (q < *n * 0.8 * log((double) *n)) { fprintf (stderr, "The fft length %dK is too large for the exponent %d. Restart with smaller fft.\n", *n / 1024, q); return NULL; } x = (int *) malloc (sizeof (int) * *n); alloc_gpu_mem(*n); write_gpu_data(q, *n); if(!new_test) { unpack_bits_int(x, x_packed, q, *n); balance_digits_int(x, q, *n); } init_x_int(x, x_packed, q, *n, stage); hipLaunchKernelGGL(( apply_weights) , dim3(*n / (2 * threads1)), dim3(threads1), 0, 0, g_x, g_xint, g_ttmp); return x; } int isReasonable(int fft) { //From an idea of AXN's mentioned on the forums int i; while(!(fft & 1)) fft /= 2; for(i = 3; i <= 7; i += 2) while((fft % i) == 0) fft /= i; return (fft); } void threadbench (int n, int passes, int device_number) { float total[216] = {0.0f}, outerTime, maxerr = 0.5f; int threads[6] = {32, 64, 128, 256, 512, 1024}; int t1, t2, t3, i; float best_time = 10000.0f; int best_t1 = 0, best_t2 = 0, best_t3 = 0; int pass; hipEvent_t start, stop; printf("CUDA bench, testing various thread sizes for fft %dK, doing %d passes.\n", n, passes); fflush(NULL); n *= 1024; cutilSafeCall (hipMalloc ((void **) &g_x, sizeof (double) * n)); cutilSafeCall (hipMemset (g_x, 0, sizeof (double) * n)); cutilSafeCall (hipMalloc ((void **) &g_ttmp, sizeof (double) * n)); cutilSafeCall (hipMemset (g_ttmp, 0, sizeof (double) * n)); cutilSafeCall (hipMalloc ((void **) &g_ct, sizeof (double) * n / 4)); cutilSafeCall (hipMemset (g_ct, 0, sizeof (double) * n / 4)); cutilSafeCall (hipMalloc ((void **) &g_ttp1, sizeof (double) * n / 32)); cutilSafeCall (hipMalloc ((void **) &g_datai, sizeof (int) * n / 32)); cutilSafeCall (hipMalloc ((void **) &g_carryi, sizeof (int) * n / 64)); cutilSafeCall (hipMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (hipMemset (g_err, 0, sizeof (float))); cutilSafeCall (hipEventCreate (&start)); cutilSafeCall (hipEventCreateWithFlags (&stop, hipEventBlockingSync)); cufftSafeCall (hipfftPlan1d (&plan, n / 2, HIPFFT_Z2Z, 1)); for(t1 = 0; t1 < 6; t1++) { if(n / (2 * threads[t1]) <= dev.maxGridSize[0] && n % (2 * threads[t1]) == 0) { for (t2 = 0; t2 < 6; t2++) { if(n / (4 * threads[t2]) <= dev.maxGridSize[0] && n % (4 * threads[t2]) == 0) { for (t3 = 0; t3 < 6; t3++) { for(pass = 1; pass <= passes; pass++) { cutilSafeCall (hipEventRecord (start, 0)); for (i = 0; i < 50; i++) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( square) , dim3(n / (4 * threads[t2])), dim3(threads[t2]) , 0, 0, n, g_x, g_ct); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads[t1])), dim3(threads[t1]) , 0, 0, g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, maxerr, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads[t1]) + threads[t3] - 1) / threads[t3]), dim3(threads[t3]) , 0, 0, g_x, g_xint, n, threads[t1], g_datai, g_carryi, g_ttp1, 0); } cutilSafeCall (hipEventRecord (stop, 0)); cutilSafeCall (hipEventSynchronize (stop)); cutilSafeCall (hipEventElapsedTime (&outerTime, start, stop)); outerTime /= 50.0f; total[36 * t1 + 6 * t2 + t3] += outerTime; //if(outerTime > max_diff[i]) max_diff[i] = outerTime; } printf ("fft size = %dK, ave time = %2.4f msec, Norm1 threads %d, Mult threads %d, Norm2 threads %d\n", n / 1024 , total[36 * t1 + 6 * t2 + t3] / passes, threads[t1], threads[t2], threads[t3]); fflush(NULL); } } } } } for (i = 0; i < 216; i++) { if(total[i] < best_time && total[i] > 0.0f) { int j = i; best_time = total[i]; best_t3 = j % 6; j /= 6; best_t2 = j % 6; best_t1 = j / 6; } } printf("\nBest time for fft = %dK, time: %2.4f, t1 = %d, t2 = %d, t3 = %d\n", n/1024, best_time / passes, threads[best_t1], threads[best_t2], threads[best_t3]); cufftSafeCall (hipfftDestroy (plan)); cutilSafeCall (hipFree ((char *) g_x)); cutilSafeCall (hipFree ((char *) g_ttmp)); cutilSafeCall (hipFree ((char *) g_ttp1)); cutilSafeCall (hipFree ((char *) g_ct)); cutilSafeCall (hipFree ((char *) g_datai)); cutilSafeCall (hipFree ((char *) g_carryi)); cutilSafeCall (hipFree ((char *) g_err)); cutilSafeCall (hipEventDestroy (start)); cutilSafeCall (hipEventDestroy (stop)); char threadfile[32]; sprintf (threadfile, "%s threads.txt", dev.name); FILE *fptr; fptr = fopen(threadfile, "a+"); if(!fptr) printf("Can't open %s threads.txt\n", dev.name); else fprintf(fptr, "%5d %4d %4d %4d %8.4f\n", n / 1024, threads[best_t1], threads[best_t2], threads[best_t3], best_time / passes); } int isprime(unsigned int n) { unsigned int i; if(n<=1) return 0; if(n>2 && n%2==0)return 0; i=3; while(i*i <= n && i < 0x10000) { if(n%i==0)return 0; i+=2; } return 1; } void cufftbench (int cufftbench_s, int cufftbench_e, int passes, int device_number) { //if(cufftbench_s % 2) cufftbench_s++; hipEvent_t start, stop; float outerTime; int i, j, k; int end = cufftbench_e - cufftbench_s + 1; float best_time; float *total, *max_diff, maxerr = 0.5f; int threads[] = {32, 64, 128, 256, 512, 1024}; int t1 = 3, t2 = 2, t3 = 2; if(end == 1) { threadbench(cufftbench_e, passes, device_number); return; } printf ("CUDA bench, testing reasonable fft sizes %dK to %dK, doing %d passes.\n", cufftbench_s, cufftbench_e, passes); total = (float *) malloc (sizeof (float) * end); max_diff = (float *) malloc (sizeof (float) * end); for(i = 0; i < end; i++) { total[i] = max_diff[i] = 0.0f; } cutilSafeCall (hipMalloc ((void **) &g_x, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (hipMemset (g_x, 0, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_ttmp, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (hipMemset (g_ttmp, 0, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_ct, sizeof (double) * 256 * cufftbench_e)); cutilSafeCall (hipMemset (g_ct, 0, sizeof (double) * 256 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_ttp1, sizeof (double) * 1024 / 32 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_datai, sizeof (int) * 1024 / 32 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_carryi, sizeof (int) * 512 / 32 * cufftbench_e)); cutilSafeCall (hipMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (hipMemset (g_err, 0, sizeof (float))); cutilSafeCall (hipEventCreate (&start)); cutilSafeCall (hipEventCreateWithFlags (&stop, hipEventBlockingSync)); for (j = cufftbench_s; j <= cufftbench_e; j++) { if(isReasonable(j) < 15) { int n = j * 1024; cufftSafeCall (hipfftPlan1d (&plan, n / 2, HIPFFT_Z2Z, 1)); for(k = 0; k < passes; k++) { cutilSafeCall (hipEventRecord (start, 0)); for (i = 0; i < 50; i++) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( square) , dim3(n / (4 * threads[t2])), dim3(threads[t2]) , 0, 0, n, g_x, g_ct); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) g_x, (hipfftDoubleComplex *) g_x, HIPFFT_BACKWARD)); hipLaunchKernelGGL(( norm1a) , dim3(n / (2 * threads[t1])), dim3(threads[t1]) , 0, 0, g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, maxerr, 0); hipLaunchKernelGGL(( norm2a) , dim3((n / (2 * threads[t1]) + threads[t3] - 1) / threads[t3]), dim3(threads[t3]) , 0, 0, g_x, g_xint, n, threads[t1], g_datai, g_carryi, g_ttp1, 0); } cutilSafeCall (hipEventRecord (stop, 0)); cutilSafeCall (hipEventSynchronize (stop)); cutilSafeCall (hipEventElapsedTime (&outerTime, start, stop)); i = j - cufftbench_s; outerTime /= 50.0f; total[i] += outerTime; if(outerTime > max_diff[i]) max_diff[i] = outerTime; } cufftSafeCall (hipfftDestroy (plan)); printf ("fft size = %dK, ave time = %2.4f msec, max-ave = %0.5f\n", j , total[i] / passes, max_diff[i] - total[i] / passes); fflush(NULL); } } cutilSafeCall (hipFree ((char *) g_x)); cutilSafeCall (hipFree ((char *) g_ttmp)); cutilSafeCall (hipFree ((char *) g_ttp1)); cutilSafeCall (hipFree ((char *) g_ct)); cutilSafeCall (hipFree ((char *) g_datai)); cutilSafeCall (hipFree ((char *) g_carryi)); cutilSafeCall (hipFree ((char *) g_err)); cutilSafeCall (hipEventDestroy (start)); cutilSafeCall (hipEventDestroy (stop)); i = end - 1; best_time = 10000.0f; while(i >= 0) { if(total[i] > 0.0f && total[i] < best_time) best_time = total[i]; else total[i] = 0.0f; i--; } char fftfile[32]; FILE *fptr; sprintf (fftfile, "%s fft.txt", dev.name); fptr = fopen(fftfile, "w"); if(!fptr) { printf("Cannot open %s.\n",fftfile); printf ("Device %s\n", dev.name); printf ("Compatibility %d.%d\n", dev.major, dev.minor); printf ("clockRate (MHz) %d\n", dev.clockRate/1000); printf ("memClockRate (MHz) %d\n", dev.memoryClockRate/1000); printf("\n fft max exp ms/iter\n"); for(i = 0; i < end; i++) { if(total[i] > 0.0f) { int tl = (int) (exp(0.9784876919 * log ((double)cufftbench_s + i)) * 22366.92473079 / 1.01); if(tl % 2 == 0) tl -= 1; while(!isprime(tl)) tl -= 2; printf("%5d %10d %8.4f\n", cufftbench_s + i, tl, total[i] / passes); } } fflush(NULL); } else { fprintf (fptr, "Device %s\n", dev.name); fprintf (fptr, "Compatibility %d.%d\n", dev.major, dev.minor); fprintf (fptr, "clockRate (MHz) %d\n", dev.clockRate/1000); fprintf (fptr, "memClockRate (MHz) %d\n", dev.memoryClockRate/1000); fprintf(fptr, "\n fft max exp ms/iter\n"); for(i = 0; i < end; i++) { if(total[i] > 0.0f) { int tl = (int) (exp(0.9784876919 * log ((double)cufftbench_s + i)) * 22366.92473079 / 1.01); if(tl % 2 == 0) tl -= 1; while(!isprime(tl)) tl -= 2; fprintf(fptr, "%5d %10d %8.4f\n", cufftbench_s + i, tl, total[i] / passes); } } fclose(fptr); printf("Optimal fft lengths saved in %s.\nPlease email a copy to [email protected].\n", fftfile); fflush(NULL); } free ((char *) total); free ((char *) max_diff); } void SetQuitting (int sig) { quitting = 1; sig==SIGINT ? printf( "\tSIGINT") : (sig==SIGTERM ? printf( "\tSIGTERM") : printf( "\tUnknown signal")) ; printf( " caught, writing checkpoint.\n"); } #ifndef _MSC_VER #include <termios.h> #include <unistd.h> #include <fcntl.h> int _kbhit (void) { struct termios oldt, newt; int ch; int oldf; tcgetattr (STDIN_FILENO, &oldt); newt = oldt; newt.c_lflag &= ~(ICANON | ECHO); tcsetattr (STDIN_FILENO, TCSANOW, &newt); oldf = fcntl (STDIN_FILENO, F_GETFL, 0); fcntl (STDIN_FILENO, F_SETFL, oldf | O_NONBLOCK); ch = getchar (); tcsetattr (STDIN_FILENO, TCSANOW, &oldt); fcntl (STDIN_FILENO, F_SETFL, oldf); if (ch != EOF) { ungetc (ch, stdin); return 1; } return 0; } #else #include <conio.h> #endif int interact(void); // defined below everything else int get_bit(int location, unsigned *control) { int digit = location / 32; int bit = location % 32; bit = 1 << bit; bit = control[digit] & bit; if(bit) bit /= bit; return(bit); } int round_off_test(int q, int n, int *j, unsigned *control, int last) { int k; float totalerr = 0.0; float terr, avgerr, maxerr = 0.0; float max_err = 0.0, max_err1 = 0.0; int bit; printf("Running careful round off test for 1000 iterations. If average error > 0.25, the test will restart with a longer FFT.\n"); for (k = 0; k < 1000 && k < last; k++) { bit = get_bit(last - k - 1, control); terr = lucas_square (q, n, k, last, &maxerr, 1, bit, 1, k == 999); if(terr > maxerr) maxerr = terr; if(terr > max_err) max_err = terr; if(terr > max_err1) max_err1 = terr; totalerr += terr; reset_err(&maxerr, 0.85); if(terr >= 0.35) { printf ("Iteration = %d < 1000 && err = %5.5f >= 0.35, increasing n from %dK\n", k, terr, n/1024); fftlen++; return 1; } if(k && (k % 100 == 0)) { printf( "Iteration %d, average error = %5.5f, max error = %5.5f\n", k, totalerr / k, max_err); max_err = 0.0; } } avgerr = totalerr/1000.0; if( avgerr > 0.25) { printf("Iteration 1000, average error = %5.5f > 0.25 (max error = %5.5f), increasing FFT length and restarting\n", avgerr, max_err); fftlen++; return 1; } else if( avgerr < 0 ) { fprintf(stderr, "Something's gone terribly wrong! Avgerr = %5.5f < 0 !\n", avgerr); exit (2); } else { printf("Iteration 1000, average error = %5.5f <= 0.25 (max error = %5.5f), continuing test.\n", avgerr, max_err1); reset_err(&maxerr, 0.85); } *j += 1000; return 0; } unsigned *get_control(int *j, int lim1, int lim2, int q) { mpz_t result; int p = 2; int limit; int prime_power = 1; unsigned *control = NULL; mpz_init(result); if(lim2 == 0) { mpz_set_ui (result, 2 * q); limit = lim1 / p; while (prime_power <= limit) prime_power *= p; mpz_mul_ui(result, result, prime_power); p = 3; while (p <= lim1) { while(p <= lim1 && !isprime(p)) p += 2; limit = lim1 / p; prime_power = p; while (prime_power <= limit) prime_power *= p; mpz_mul_ui(result, result, prime_power); p += 2; } } else { p = lim1; if(!(lim1 & 1)) p++; mpz_set_ui (result, 1); while (p <= lim2) { while(p <= lim2 && !isprime(p)) p += 2; mpz_mul_ui(result, result, p); printf("prime_power: %d, %d\n", prime_power, p); p += 2; } } *j = mpz_sizeinbase (result, 2); control = (unsigned *) malloc (sizeof (unsigned) * ((*j + 31) / 32)); mpz_export (control, NULL, -1, 4, 0, 0, result); mpz_clear (result); return control; } int get_gcd(unsigned *x_packed, int q, int n, int stage) { mpz_t result, prime, prime1; int end = (q + 31) / 32; int rv = 0; mpz_init2( result, q); mpz_init2( prime, q); mpz_init2( prime1, q); mpz_import (result, end, -1, sizeof(x_packed[0]), 0, 0, x_packed); if(stage == 1) mpz_sub_ui (result, result, 1); mpz_setbit (prime, q); mpz_sub_ui (prime, prime, 1); if (mpz_cmp_ui (result, 0)) { mpz_gcd (prime1, prime, result); if (mpz_cmp_ui (prime1, 1)) { rv = 1; printf( "M%d has a factor: ", q); mpz_out_str (stdout, 10, prime1); if (stage==1) printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,b1,g_e,n/1024, program); // Found in stage 1 else printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); FILE* fp = fopen_and_lock(RESULTSFILE, "a"); fprintf (fp, "M%d has a factor: ", q); mpz_out_str (fp, 10, prime1); if (AID[0] && strncasecmp(AID, "N/A", 3)) { if (stage==1) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,b1,g_e,n/1024, AID, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,g_b2,g_e,n/1024, AID, program); } else { if (stage==1) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,b1,g_e,n/1024, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); } unlock_and_fclose(fp); } } if (rv == 0) { printf( "M%d Stage %d found no factor", q, stage); printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); if (stage==2) { FILE* fp = fopen_and_lock(RESULTSFILE, "a"); fprintf (fp, "M%d found no factor", q); if (AID[0] && strncasecmp(AID, "N/A", 3)) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,g_b2,g_e,n/1024, AID, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); unlock_and_fclose(fp); } } mpz_clear (result); mpz_clear (prime); mpz_clear (prime1); return rv; } /**************************************************************/ /* Routines to compute optimal and test to optimal P-1 bounds */ /* Stolen from Woltman's Prime95 and adapted to CUDAPm1 */ /**************************************************************/ /* This table gives the values of Dickman's function given an input */ /* between 0.000 and 0.500. These values came from a different program */ /* that did a numerical integration. */ static double savedF[501] = { 0, 0, 0, 0, 0, 0, 3.3513e-215, 5.63754e-208, 4.00865e-201, 1.65407e-194, 4.53598e-188, 8.93587e-182, 1.33115e-175, 1.55557e-169, 1.46609e-163, 1.13896e-157, 7.42296e-152, 3.80812e-146, 1.56963e-140, 5.32886e-135, 1.51923e-129, 3.69424e-124, 7.76066e-119, 1.42371e-113, 2.30187e-108, 3.30619e-103, 4.24793e-098, 4.80671e-093, 4.78516e-088, 4.22768e-083, 3.33979e-078, 2.37455e-073, 1.52822e-068, 8.94846e-064, 4.78909e-059, 4.65696e-057, 4.49802e-055, 4.31695e-053, 4.07311e-051, 3.81596e-049, 3.61043e-047, 1.73046e-045, 8.26375e-044, 3.9325e-042, 1.86471e-040, 8.8102e-039, 4.14402e-037, 1.99497e-035, 1.83001e-034, 1.59023e-033, 1.45505e-032, 1.24603e-031, 1.15674e-030, 9.70832e-030, 9.23876e-029, 4.20763e-028, 4.24611e-027, 1.61371e-026, 6.59556e-026, 3.17069e-025, 1.12205e-024, 4.65874e-024, 2.01267e-023, 6.2941e-023, 3.02604e-022, 7.84622e-022, 2.3526e-021, 6.7049e-021, 1.88634e-020, 4.59378e-020, 1.37233e-019, 4.00682e-019, 8.34209e-019, 2.21612e-018, 4.84252e-018, 1.02457e-017, 2.03289e-017, 4.07704e-017, 1.33778e-016, 2.4263e-016, 4.14981e-016, 7.0383e-016, 1.20511e-015, 3.85644e-015, 6.52861e-015, 1.06563e-014, 1.67897e-014, 2.79916e-014, 4.54319e-014, 9.83296e-014, 1.66278e-013, 2.61858e-013, 4.03872e-013, 5.98967e-013, 1.09674e-012, 1.70553e-012, 2.56573e-012, 3.72723e-012, 6.14029e-012, 9.33636e-012, 1.36469e-011, 1.89881e-011, 2.68391e-011, 4.12016e-011, 5.94394e-011, 8.43746e-011, 1.12903e-010, 1.66987e-010, 2.36959e-010, 3.11726e-010, 4.28713e-010, 5.90781e-010, 7.79892e-010, 1.05264e-009, 1.4016e-009, 1.87506e-009, 2.42521e-009, 3.14508e-009, 4.38605e-009, 5.43307e-009, 6.96737e-009, 8.84136e-009, 1.16286e-008, 1.42343e-008, 1.79697e-008, 2.30867e-008, 2.88832e-008, 3.52583e-008, 4.31032e-008, 5.46444e-008, 6.66625e-008, 8.06132e-008, 1.00085e-007, 1.20952e-007, 1.4816e-007, 1.80608e-007, 2.13125e-007, 2.5324e-007, 3.094e-007, 3.64545e-007, 4.31692e-007, 5.19078e-007, 6.03409e-007, 7.21811e-007, 8.53856e-007, 9.71749e-007, 1.13949e-006, 1.37042e-006, 1.53831e-006, 1.79066e-006, 2.15143e-006, 2.40216e-006, 2.76872e-006, 3.20825e-006, 3.61263e-006, 4.21315e-006, 4.76404e-006, 5.43261e-006, 6.2041e-006, 6.96243e-006, 7.94979e-006, 8.89079e-006, 1.01387e-005, 1.13376e-005, 1.2901e-005, 1.44183e-005, 1.59912e-005, 1.79752e-005, 1.99171e-005, 2.22665e-005, 2.47802e-005, 2.7678e-005, 3.0492e-005, 3.34189e-005, 3.71902e-005, 4.12605e-005, 4.54706e-005, 4.98411e-005, 5.48979e-005, 6.06015e-005, 6.61278e-005, 7.22258e-005, 7.97193e-005, 8.66574e-005, 9.48075e-005, 0.00010321, 0.000112479, 0.000121776, 0.000133344, 0.000144023, 0.000156667, 0.000168318, 0.000183192, 0.000196527, 0.00021395, 0.000228389, 0.000249223, 0.000264372, 0.000289384, 0.000305707, 0.000333992, 0.000353287, 0.000379868, 0.000408274, 0.00043638, 0.000465319, 0.000496504, 0.000530376, 0.000566008, 0.000602621, 0.000642286, 0.000684543, 0.000723853, 0.000772655, 0.000819418, 0.000868533, 0.000920399, 0.000975529, 0.00103188, 0.00109478, 0.00115777, 0.00122087, 0.00128857, 0.00136288, 0.00143557, 0.00151714, 0.00159747, 0.00167572, 0.00176556, 0.00186199, 0.00195063, 0.00205239, 0.00216102, 0.00225698, 0.00236962, 0.00249145, 0.00259636, 0.00272455, 0.00287006, 0.00297545, 0.00312346, 0.0032634, 0.00340298, 0.00355827, 0.00371195, 0.00387288, 0.00404725, 0.00420016, 0.00439746, 0.00456332, 0.00475936, 0.00495702, 0.00514683, 0.00535284, 0.00557904, 0.00578084, 0.00601028, 0.00623082, 0.00647765, 0.00673499, 0.00696553, 0.00722529, 0.00748878, 0.00775537, 0.00803271, 0.00832199, 0.00861612, 0.00889863, 0.00919876, 0.00953343, 0.00985465, 0.0101993, 0.0105042, 0.0108325, 0.0112019, 0.0115901, 0.0119295, 0.0123009, 0.0127191, 0.0130652, 0.0134855, 0.0139187, 0.0142929, 0.0147541, 0.0151354, 0.0156087, 0.0160572, 0.0165382, 0.0169669, 0.0174693, 0.017946, 0.0184202, 0.0189555, 0.0194336, 0.0200107, 0.0204863, 0.0210242, 0.0216053, 0.0221361, 0.0226858, 0.0232693, 0.0239027, 0.0244779, 0.025081, 0.0257169, 0.0263059, 0.0269213, 0.0275533, 0.0282065, 0.0289028, 0.029567, 0.0302268, 0.0309193, 0.0316619, 0.0323147, 0.0330398, 0.0338124, 0.0345267, 0.0353038, 0.0360947, 0.0368288, 0.0376202, 0.0383784, 0.0391894, 0.0399684, 0.0408148, 0.0416403, 0.042545, 0.0433662, 0.0442498, 0.0451003, 0.046035, 0.0468801, 0.0478059, 0.0487442, 0.0496647, 0.0505752, 0.0515123, 0.0524792, 0.0534474, 0.0544682, 0.0554579, 0.0565024, 0.0574619, 0.0584757, 0.0595123, 0.0605988, 0.0615874, 0.062719, 0.0637876, 0.064883, 0.0659551, 0.0670567, 0.0681256, 0.0692764, 0.0704584, 0.0715399, 0.0727237, 0.0738803, 0.0750377, 0.0762275, 0.0773855, 0.0785934, 0.0797802, 0.0810061, 0.0822205, 0.0834827, 0.084714, 0.0858734, 0.0871999, 0.0884137, 0.0896948, 0.090982, 0.0922797, 0.093635, 0.0948243, 0.0961283, 0.0974718, 0.0988291, 0.100097, 0.101433, 0.102847, 0.104222, 0.105492, 0.106885, 0.10833, 0.109672, 0.111048, 0.112438, 0.113857, 0.115311, 0.11673, 0.118133, 0.119519, 0.12099, 0.122452, 0.123905, 0.125445, 0.126852, 0.128326, 0.129793, 0.131277, 0.132817, 0.134305, 0.135772, 0.137284, 0.138882, 0.140372, 0.14192, 0.143445, 0.14494, 0.146515, 0.148145, 0.149653, 0.151199, 0.152879, 0.154368, 0.155958, 0.157674, 0.159211, 0.160787, 0.16241, 0.164043, 0.165693, 0.167281, 0.168956, 0.170589, 0.172252, 0.173884, 0.175575, 0.177208, 0.178873, 0.180599, 0.18224, 0.183975, 0.185654, 0.187363, 0.189106, 0.190729, 0.19252, 0.194158, 0.195879, 0.197697, 0.199391, 0.201164, 0.202879, 0.204602, 0.206413, 0.20818, 0.209911, 0.211753, 0.213484, 0.215263, 0.21705, 0.218869, 0.220677, 0.222384, 0.224253, 0.226071, 0.227886, 0.229726, 0.231529, 0.233373, 0.235234, 0.237081, 0.238853, 0.240735, 0.242606, 0.244465, 0.246371, 0.248218, 0.250135, 0.251944, 0.253836, 0.255708, 0.257578, 0.259568, 0.261424, 0.263308, 0.265313, 0.26716, 0.269073, 0.271046, 0.272921, 0.274841, 0.276819, 0.278735, 0.280616, 0.282653, 0.284613, 0.286558, 0.288478, 0.290472, 0.292474, 0.294459, 0.296379, 0.298382, 0.300357, 0.302378, 0.30434, 0.306853 }; /* This evaluates Dickman's function for any value. See Knuth vol. 2 */ /* for a description of this function and its use. */ double F (double x) { int i; if (x >= 1.0) return (1.0); if (x >= 0.5) return (1.0 + log (x)); i = (int) (x * 1000.0); return (savedF[i] + (x * 1000.0 - i) * (savedF[i+1] - savedF[i])); } /* Analyze how well P-1 factoring will perform */ void guess_pminus1_bounds ( int guess_exp, /* N in K*B^N+C. Exponent to test. */ int how_far_factored, /* Bit depth of trial factoring */ int tests_saved, /* 1 if doublecheck, 2 if first test */ int vals, int *bound1, int *bound2, double *success_rate) { int guess_B1, guess_B2, /*vals,*/ i; double h, pass1_squarings, pass2_squarings; double logB1, logB2, kk, logkk, temp, logtemp, log2; double prob, gcd_cost, ll_tests, numprimes; struct { int B1; int B2; double prob; double pass1_squarings; double pass2_squarings; } best[2]; for (i=0; i<2; i++) { best[i].B1=0; best[i].B2=0; best[i].prob=0; best[i].pass1_squarings=0; best[i].pass2_squarings=0; } /* Guard against wild tests_saved values. Huge values will cause this routine */ /* to run for a very long time. This shouldn't happen as auxiliaryWorkUnitInit */ /* now has the exact same test. */ if (tests_saved > 10) tests_saved = 10; /* Balance P-1 against 1 or 2 LL tests (actually more since we get a */ /* corrupt result reported some of the time). */ ll_tests = (double) tests_saved + 2 * 0.018; /* Precompute the cost of a GCD. We used Excel to come up with the */ /* formula GCD is equivalent to 861 * Ln (p) - 7775 transforms. */ /* Since one squaring equals two transforms we get the formula below. */ /* NOTE: In version 22, the GCD speed has approximately doubled. I've */ /* adjusted the formula accordingly. */ gcd_cost = (430.5 * log ((double) guess_exp) - 3887.5) / 2.0; if (gcd_cost < 50.0) gcd_cost = 50.0; /* Compute how many temporaries we can use given our memory constraints. */ /* Allow 1MB for code and data structures. */ // vals = cvt_mem_to_estimated_gwnums (max_mem (thread_num), k, b, n, c); // if (vals < 1) vals = 1; //vals = 176; /* Find the best B1 */ log2 = log ((double) 2.0); for (guess_B1 = 10000; ; guess_B1 += 5000) { /* Constants */ logB1 = log ((double) guess_B1); /* Compute how many squarings will be required in pass 1 */ pass1_squarings = ceil (1.44 * guess_B1); /* Try a lot of B2 values */ for (guess_B2 = guess_B1; guess_B2 <= guess_B1 * 100; guess_B2 += guess_B1 >> 2) { /* Compute how many squarings will be required in pass 2. In the */ /* low-memory cases, assume choose_pminus1_plan will pick D = 210, E = 1 */ /* If more memory is available assume choose_pminus1_plan will pick */ /* D = 2310, E = 2. This will provide an accurate enough cost for our */ /* purposes even if different D and E values are picked. See */ /* choose_pminus1_plan for a description of the costs of P-1 stage 2. */ /* For cudapm1, we're not set up for e = 1, assume e = 2 in both cases*/ logB2 = log ((double) guess_B2); numprimes = (unsigned long) (guess_B2 / (logB2 - 1.0) - guess_B1 / (logB1 - 1.0)); if (guess_B2 <= guess_B1) { pass2_squarings = 0.0; } else if (vals <= 8) { /* D = 210, E = 1, passes = 48/temps */ unsigned long num_passes; num_passes = (unsigned long) ceil (48.0 / (vals - 3)); pass2_squarings = ceil ((guess_B2 - guess_B1) / 210.0) * num_passes; pass2_squarings += numprimes * 1.1; } else { unsigned long num_passes; double numpairings; num_passes = (unsigned long) ceil (480.0 / (vals - 3)); numpairings = (unsigned long) (numprimes / 2.0 * numprimes / ((guess_B2-guess_B1) * 480.0/2310.0)); pass2_squarings = 2400.0 + num_passes * 90.0; /* setup costs */ pass2_squarings += ceil ((guess_B2-guess_B1) / 4620.0) * 2.0 * num_passes; /*number of base changes per pass * e with e = 2*/ pass2_squarings += numprimes - numpairings; /*these are the sub_mul operations*/ } /* Pass 2 FFT multiplications seem to be at least 20% slower than */ /* the squarings in pass 1. This is probably due to several factors. */ /* These include: better L2 cache usage and no calls to the faster */ /* gwsquare routine. Nov, 2009: On my Macbook Pro, with exponents */ /* around 45M and using 800MB memory, pass2 squarings are 40% slower. */ /* Owftheevil reports that CUDA squarings are only about 2% slower. */ /* New normaliztion kernels benefit stage 1 more than stage 2, back to 9% */ pass2_squarings *= 1.09; // was 1.35 /* What is the "average" value that must be smooth for P-1 to succeed? */ /* Ordinarily this is 1.5 * 2^how_far_factored. However, for Mersenne */ /* numbers the factor must be of the form 2kp+1. Consequently, the */ /* value that must be smooth (k) is much smaller. */ kk = 1.5 * pow (2.0, how_far_factored); // if (k == 1.0 && b == 2 && c == -1) kk = kk / 2.0 / guess_exp; logkk = log (kk); /* Set temp to the number that will need B1 smooth if k has an */ /* average-sized factor found in stage 2 */ temp = kk / ((guess_B1 + guess_B2) / 2); logtemp = log (temp); /* Loop over increasing bit lengths for the factor */ prob = 0.0; for (h = how_far_factored; ; ) { double prob1, prob2; /* If temp < 1.0, then there are no factor to find in this bit level */ if (logtemp > 0.0) { /* See how many smooth k's we should find using B1 */ /* Using Dickman's function (see Knuth pg 382-383) we want k^a <= B1 */ prob1 = F (logB1 / logkk); /* See how many smooth k's we should find using B2 */ /* Adjust this slightly to eliminate k's that have two primes > B1 and < B2 */ /* Do this by assuming the largest factor is the average of B1 and B2 */ /* and the remaining cofactor is B1 smooth */ prob2 = prob1 + (F (logB2 / logkk) - prob1) * (F (logB1 / logtemp) / F (logB2 / logtemp)); if (prob2 < 0.0001) break; /* Add this data in to the total chance of finding a factor */ prob += prob2 / (h + 0.5); } /* Move to next bit level */ h += 1.0; logkk += log2; logtemp += log2; } /* See if this is a new best case scenario */ if (guess_B2 == guess_B1 || prob * ll_tests * guess_exp - pass2_squarings > best[0].prob * ll_tests * guess_exp - best[0].pass2_squarings){ best[0].B2 = guess_B2; best[0].prob = prob; best[0].pass2_squarings = pass2_squarings; if (vals < 4) break; continue; } if (prob * ll_tests * guess_exp - pass2_squarings < 0.9 * (best[0].prob * ll_tests * guess_exp - best[0].pass2_squarings)) break; continue; } /* Is this the best B1 thusfar? */ if (guess_B1 == 10000 || best[0].prob * ll_tests * guess_exp - (pass1_squarings + best[0].pass2_squarings) > best[1].prob * ll_tests * guess_exp - (best[1].pass1_squarings + best[1].pass2_squarings)) { best[1].B1 = guess_B1; best[1].B2 = best[0].B2; best[1].prob = best[0].prob; best[1].pass1_squarings = pass1_squarings; best[1].pass2_squarings = best[0].pass2_squarings; continue; } if (best[0].prob * ll_tests * guess_exp - (pass1_squarings + best[0].pass2_squarings) < 0.9 * (best[1].prob * ll_tests * guess_exp - (best[1].pass1_squarings + best[1].pass2_squarings))) break; continue; } /* Return the final best choice */ if (best[1].prob * ll_tests * guess_exp > best[1].pass1_squarings + best[1].pass2_squarings + gcd_cost) { *bound1 = best[1].B1; *bound2 = best[1].B2; // *squarings = (unsigned long) // (best[1].pass1_squarings + // best[1].pass2_squarings + gcd_cost); *success_rate = best[1].prob; } else { *bound1 = 0; *bound2 = 0; // *squarings = 0; *success_rate = 0.0; } } /************************************************************** * * Main Function * **************************************************************/ int stage2_init_param3(int e, int n, int trans, float *err) { int j, i, k = 0, base; mpz_t exponent; long long b[7]; for(i = 0; i <= e/2; i++) { base = 2 * i + 1; b[i] = 1; for(j = 0; j < e / 2; j++) b[i] *= base; b[i] *= b[i]; } for(i = e/2; i > 0; i--) { while (k < i) { j = i; while(j > k) { b[j] = b[j] - b[j-1]; j--; } k++; j = i; while(j > k) { b[j] = b[j] - b[j-1]; j--; } } } mpz_init(exponent); for(i = 0; i <= e / 2; i++) { mpz_set_ui (exponent, b[i]); trans = E_to_the_p(&e_data[2 * i * n], g_x, exponent, n, trans, err); if(i > 0) { cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) &e_data[2 * i * n], (hipfftDoubleComplex *) &e_data[2 * i * n], HIPFFT_BACKWARD)); hipLaunchKernelGGL(( copy_kernel), dim3(n / (2*threads1)), dim3(threads1), 0, 0, &e_data[(2 * i - 1) * n], &e_data[2 * i * n]); trans++; } } E_pre_mul(&e_data[e * n], &e_data[e * n], n, 0); E_pre_mul(&e_data[0], &e_data[0], n, 1); trans++; mpz_clear(exponent); return trans; } int next_base1(int k, int e, int n, int trans, float *err) { int j; if(k == 1) return(stage2_init_param3(e, n, trans, err)); if(k > 3) { if(k <= e + 1) { E_mul(&e_data[(k - 3) * n], &e_data[(k - 2) * n], &e_data[(k - 3) * n], n, *err, 0); j = e + 3 - k; trans += 2 * (k - 3); } else { E_half_mul(&e_data[(e-1) * n], &e_data[(e-1) * n], &e_data[e * n], n, *err); j = 1; trans += 2 * ( e - 1); } for(; j < e-1; j++) E_mul(&e_data[(e-j-1) * n], &e_data[(e-j) * n], &e_data[(e-j-1) * n], n, *err, 1); cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) &e_data[1 * n], (hipfftDoubleComplex *) &e_data[1 * n], HIPFFT_BACKWARD)); } E_half_mul(&e_data[0], &e_data[1 * n], &e_data[0], n, *err); E_pre_mul(&e_data[0], &e_data[0], n, 1); trans += 2; return trans; } int stage2_init_param1(int k, int base, int e, int n, int trans, float *err) { int i, j; if(base > 1) { mpz_t exponent; mpz_init(exponent); mpz_ui_pow_ui (exponent, base, e); trans = E_to_the_p(&e_data[0], g_x, exponent, n, trans, err); E_pre_mul(g_x, &e_data[0], n, 1); trans++; mpz_clear(exponent); } if(k < 2 * e) for(j = 1; j <= k; j += 2) { trans = next_base1(j, e, n, trans, err); cutilSafeThreadSync(); } else { mpz_t *exponents; exponents = (mpz_t *) malloc((e + 1) * sizeof(mpz_t)); for(j = 0; j <= e; j++) mpz_init(exponents[j]); for(j = e; j >= 0; j--) mpz_ui_pow_ui (exponents[j], (k - j * 2), e); for(j = 0; j < e; j++) for(i = e; i > j; i--) mpz_sub(exponents[i], exponents[i-1], exponents[i]); for(j = 0; j <= e; j++) trans = E_to_the_p(&e_data[j * n], g_x, exponents[j], n, trans, err); for(j = 0; j <= e; j++) mpz_clear(exponents[j]); E_pre_mul(&e_data[0], &e_data[0], n, 1); E_pre_mul(&e_data[e * n], &e_data[e * n], n, 1); for(j = 1; j < e; j++) cufftSafeCall (hipfftExecZ2Z (plan, (hipfftDoubleComplex *) &e_data[j * n], (hipfftDoubleComplex *) &e_data[j * n], HIPFFT_BACKWARD)); trans += e + 1; } return trans; } int stage2_init_param2(int num, int cur_rp, int base, int e, int n, uint8 *gaps, int trans, float *err) { int rp = 1, j = 0, i; mpz_t exponent; mpz_init(exponent); while(j < cur_rp) { j++; rp += 2 * gaps[j]; } for(i = 0; i < num; i++) { mpz_ui_pow_ui (exponent, rp, e); trans = E_to_the_p(&rp_data[i * n], g_x, exponent, n, trans, err); E_pre_mul(&rp_data[i * n], &rp_data[i * n], n, 1); trans++; j++; if(rp < base - 1) rp += 2 * gaps[j]; } mpz_clear(exponent); return trans; } int stage2_init_param4(int num, int cur_rp, int base, int e, int n, uint8 *gaps, int trans, float *err) { int rp = 1, j = 0, i, k = 1; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } trans = stage2_init_param1(rp, 1, e, n, trans, err); hipLaunchKernelGGL(( copy_kernel), dim3(n / (2*threads1)), dim3(threads1), 0, 0, &rp_data[0], &e_data[0]); k = rp + 2; for(i = 1; i < num; i++) { j++; rp += 2 * gaps[j]; while(k <= rp) { trans = next_base1(k, e, n, trans, err); cutilSafeThreadSync(); k += 2; } hipLaunchKernelGGL(( copy_kernel), dim3(n / (2*threads1)), dim3(threads1), 0, 0, &rp_data[i * n], &e_data[0]); } return trans; } int rp_init_count1(int k, int base, int e, int n) { int i, j, trans = 0; int numb[6] = {10,38,102,196,346,534}; int numb1[11] = {2,8,18,32,50,72,96,120,144,168,192}; mpz_t exponent; mpz_init(exponent); mpz_ui_pow_ui (exponent, base, e); trans += (int) mpz_sizeinbase (exponent, 2) + (int) mpz_popcount(exponent) - 2; mpz_clear(exponent); if(k < 2 * e) { trans = 2 * trans + 1; trans += numb[e / 2 - 1] + numb1[k/2-1]; return(trans); } else { mpz_t *exponents; exponents = (mpz_t *) malloc((e+1) * sizeof(mpz_t)); for(j = 0; j <= e; j++) mpz_init(exponents[j]); for(j = e; j >= 0; j--) mpz_ui_pow_ui (exponents[j], (k - j * 2), e); for(j = 0; j < e; j++) for(i = e; i > j; i--) mpz_sub(exponents[i], exponents[i-1], exponents[i]); for(j = 0; j <= e; j++) { trans += (int) mpz_sizeinbase (exponents[j], 2) + (int) mpz_popcount(exponents[j]) - 2; } for(j = 0; j <= e; j++) mpz_clear(exponents[j]); return 2 * (trans + e + 2) - 1; } } int rp_init_count1a(int k, int base, int e, int n) { int trans; int numb[6] = {10,38,102,196,346,534}; int numb1[12] = {0,2,8,18,32,50,72,96,120,144,168,192}; trans = (int) (e * log2((double)base) * 3.0 ); if(k < 2 * e) { trans += numb[e/2-1] + numb1[(k+1)/2-1]; } else { if(e == 2) trans += (int) (9.108 * log2((double)k) + 10.7); else if(e == 4) trans += (int) (30.349 * log2((double)k) + 50.5); else if(e == 6) trans += (int) (64.560 * log2((double)k) + 137.6); else if(e == 8) trans += (int) (110.224 * log2((double)k) + 265.2); else if(e == 10) trans += (int) (168.206 * log2((double)k) + 478.6); else trans += (int) (237.888 * log2((double)k) + 731.5); } return trans; } int rp_init_count2(int num, int cur_rp, int e, int n, uint8 *gaps) { int rp = 1, j = 0, i, trans = 0; int numb[6] = {10,38,102,196,346,534}; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } if(cur_rp == 0) trans -= e * e / 2 - 1; cur_rp = rp; if(rp == 1) trans += numb[e/2-1]; else trans = rp_init_count1(rp, 1, e, n); for(i = 1; i < num; i++) { j++; rp += 2 * gaps[j]; } trans += e * (rp - cur_rp); return trans; } int rp_init_count2a(int cur_rp, int e, int n, uint8 *gaps) { int rp = 1, j = 0, trans = 0; int numb[6] = {10,38,102,196,346,534}; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } if(cur_rp == 0) trans -= e * e / 2 - 1; cur_rp = rp; if(rp == 1) trans += numb[e/2-1]; else trans = rp_init_count1a(rp, 1, e, n); return trans; } int stage2(int *x_int, unsigned *x_packed, int q, int n, int nrp, float err) { int j, i = 0, t; int e, d, b2 = g_b2; int rpt = 0, rp; int ks, ke, m = 0, k; int last = 0; uint8 *bprimes = NULL; int prime, prime_pair; uint8 *rp_gaps = NULL; int sprimes[] = {3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 43, 47, 53, 0}; uint8 two_to_i[] = {1, 2, 4, 8, 16, 32, 64, 128}; int count0 = 0, count1 = 0, count2 = 0; mpz_t control; timeval time0, time1; { int best_guess = 0x01111111; int best_d = 0, best_e = 0, best_nrp = 0; int guess; int passes; int su; int nrpe = 0; int start_e = 2, end_e = 12; int start_d = 9240, d_div = 1; if(g_e) { start_e = g_e; end_e = g_e; } if(g_d) { start_d = g_d; d_div = g_d; } for(d = start_d; d > 1;d /= d_div) { if(d >= 2310) { rpt = d / 2310 * 480; i = 4; } else if(d >= 210) { rpt = d / 210 * 48; i = 3; } else if(d >= 30) { rpt = d / 30 * 8; i = 2; } //else if(d >= 6) // { // rpt = d / 6 * 2; // i = 1; //} if(b1 * sprimes[i] * 53 < b2) ks = ((((b1 * 53 + 1) >> 1) + d - 1) / d - 1) * d; else if(b1 * sprimes[i] < b2) ks = ((((b2 / sprimes[i] + 1) >> 1) + d - 1) / d - 1) * d; else ks = ((((b1 + 1) >> 1) + d - 1) / d - 1) * d; ke = ((((b2 + 1) >> 1) + d - 1) / d) * d; ks = ((ks / d) << 1) + 1; ke = (ke / d) << 1; for(e = start_e; e <= end_e; e +=2) { nrpe = nrp - e - 1; if(nrpe <= 0) break; passes = (rpt + nrpe - 1) / nrpe; while(nrpe > 1 && passes == (rpt + nrpe - 2) / (nrpe - 1)) nrpe--; guess = rp_init_count1a(ks, d, e, n) * passes; for(su = 0; su < rpt; su += nrpe)guess += rp_init_count1a((su * d / rpt) | 1, 1, e, n); guess += 2 * e * (d/2 - passes) - e * e / 2; double numprimes = (double) ke*d / (log ((double) ke*d) - 1.0) - (double) b1 / (log ((double) b1) - 1.0); double numpairings = numprimes / 2.0 * numprimes / ((double) ((ke - ks)*d) * (double) rpt / d); guess += e * (ke - ks) * passes + (2.2) * (int)(numprimes-numpairings); if(e == 4) guess = (int) guess * 0.95; if(e == 6) guess = (int) guess * 0.90; if(e == 12) guess = (int) guess * 0.85; if(guess < best_guess) { best_guess = guess; best_d = d; best_e = e; best_nrp = nrpe; } } if(d>2310) d -= 2310; else if(d>210) d -= 210; else if(d>=30) d -= 30; //else if(d>=6) d -= 6; } d = best_d; e = best_e; nrp = best_nrp; } if(d == 0) exit(3); int end = (q + 31) / 32; if(x_packed[end + 10] == 0) { x_packed[end + 10] = b2; x_packed[end + 11] = d; x_packed[end + 12] = e; x_packed[end + 13] = nrp; x_packed[end + 14] = 0; // m = number of relative primes already finished x_packed[end + 15] = 0; // k = how far done with currect crop of relative primes x_packed[end + 16] = 0; // t = where to find next relativel prime in the bit array x_packed[end + 17] = 0; // extra initialization transforms from starting in the middle of a pass } else { b1 = x_packed[end + 5]; b2 = x_packed[end + 10]; d = x_packed[end + 11]; e = x_packed[end + 12]; nrp = x_packed[end + 13]; } g_e = e; printf("Using b1 = %d, b2 = %d, d = %d, e = %d, nrp = %d\n",b1, b2,d,e,nrp); if(d % 2310 == 0) { i = 4; rpt = 480 * d / 2310; } else if(d % 210 == 0) { i = 3; rpt = 48 * d / 210; } else if(d % 30 == 0) { i = 2; rpt = 8 * d / 30; } else { i = 1; rpt = 2 * d / 6; } if(b1 * sprimes[i] * 53 < b2) ks = ((((b1 * 53 + 1) >> 1) + d - 1) / d - 1) * d; else if(b1 * sprimes[i] < b2) ks = ((((b2 / sprimes[i] + 1) >> 1) + d - 1) / d - 1) * d; else ks = ((((b1 + 1) >> 1) + d - 1) / d - 1) * d; ke = ((((b2 + 1) >> 1) + d - 1) / d) * d; bprimes = (uint8*) malloc(ke * sizeof(uint8)); if(!bprimes) { printf("failed to allocate bprimes\n"); exit (1); } for (j = 0; j < ke; j++) bprimes[j] = 0; gtpr(2 * ke, bprimes); for(j = 0; j < 10; j++) bprimes[j] = 1; bprimes[0] = bprimes[4] = bprimes[7] = 0; cutilSafeCall (hipMalloc ((void **) &e_data, sizeof (double) * n * (e + 1))); cutilSafeCall (hipMalloc ((void **) &rp_data, sizeof (double) * n * nrp)); for( j = (b1 + 1) >> 1; j < ks; j++) { if(bprimes[j] == 1) { m = i; last = j; while(sprimes[m]) { prime = sprimes[m] * j + (sprimes[m] >> 1); m++; if(prime < ks) continue; if(prime > ke) break; prime_pair = prime + d - 1 - ((prime % d) << 1); bprimes[last] = 0; bprimes[prime] = 1; if(bprimes[prime_pair]) break; last = prime; } } } rp_gaps = (uint8*) malloc(rpt * sizeof(uint8)); if(!rp_gaps) { printf("failed to allocate rp_gaps\n"); exit (1); } j = 0; k = 0; for(rp = 1; rp < d; rp += 2) { k++; for (m = 0; m < i; m++) if((rp % sprimes[m]) == 0) break; if(m == i) { rp_gaps[j] = k; j++; k = 0; } } k = ks + (d >> 1); m = k - 1; j = 0; rp = 0; uint8 *tprimes = (uint8*) malloc(rpt / 8 * sizeof(uint8)); int l = 0; while(m < ke) { tprimes[l] = 0; for(i = 0; i < 8; i++) { m += rp_gaps[j]; k -= rp_gaps[j]; if (bprimes[m] || bprimes[k]) { tprimes[l] |= two_to_i[i]; count1++; } else count0++; if (bprimes[m] && bprimes[k]) count2++; j++; if(j == rpt) { j = 0; m += (d >> 1); k = m + 1; } } l++; if(l * 8 == rpt) { for(t = 0; t < (rpt >> 3); t++) bprimes[rp + t] = tprimes[t]; l = 0; rp += rpt >> 3; } } free(tprimes); printf("Zeros: %d, Ones: %d, Pairs: %d\n", count0, count1, count2); mpz_init(control); mpz_import(control, (ke - ks) / d * rpt / sizeof(bprimes[0]) , -1, sizeof(bprimes[0]), 0, 0, bprimes); free(bprimes); unpack_bits_int(x_int, x_packed, q, n); balance_digits_int(x_int, q, n); hipMemcpy (&g_xint[n], x_int, sizeof (int) * n , hipMemcpyHostToDevice); int fp = 1; int num_tran = 0, temp_tran; int tran_save; int itran_tot; int ptran_tot; int itran_done = 0; int ptran_done = 0; double checkpoint_int, checkpoint_bnd; double time, ptime = 0.0, itime = 0.0; ks = ((ks / d) << 1) + 1; ke = (ke / d) << 1; m = x_packed[end + 14]; k = x_packed[end + 15]; t = x_packed[end + 16]; if(m + k > 0) // some stage 2 has already been done { itran_done = x_packed[end + 18] + x_packed[end + 17]; ptran_done = x_packed[end + 19]; itime = x_packed[end + 20]; ptime = x_packed[end + 21]; } ptran_tot = (ke - ks - 1) * e * ((rpt + nrp - 1) / nrp) + count1 * 2; int passes; passes = (rpt + nrp - 1) / nrp; itran_tot = rp_init_count1(ks, d, e, n) * passes + x_packed[end + 17]; int su = 0; while (su < rpt) { if(rpt - su > nrp) { itran_tot += rp_init_count2(nrp, su, e, n, rp_gaps); } else { itran_tot += rp_init_count2(rpt - su, su, e, n, rp_gaps); } su += nrp; } if (k == 0) k = ks; if(nrp > rpt - m) nrp = rpt - m; gettimeofday (&time0, NULL); do { printf("Processing %d - %d of %d relative primes.\n", m + 1, m + nrp, rpt); printf("Inititalizing pass... "); hipLaunchKernelGGL(( apply_weights) , dim3(n / (2 * threads1)), dim3(threads1), 0, 0, g_x, &g_xint[0], g_ttmp); E_pre_mul(g_x, g_x, n, 1); num_tran = stage2_init_param4(nrp, m, d, e, n, rp_gaps, num_tran, &err); temp_tran = num_tran; num_tran = stage2_init_param1(k, d, e, n, num_tran, &err); hipLaunchKernelGGL(( apply_weights) , dim3(n / (2 * threads1)), dim3(threads1), 0, 0, g_x, &g_xint[n], g_ttmp); temp_tran = num_tran - temp_tran; itran_done += num_tran; if((m > 0 || k > ks) && fp) { x_packed[end + 17] += num_tran; itran_tot += num_tran; } fp = 0; cutilSafeCall (hipMemcpy (&err, g_err, sizeof (float), hipMemcpyDeviceToHost)); gettimeofday (&time1, NULL); time = 1000000.0 * (double)(time1.tv_sec - time0.tv_sec) + time1.tv_usec - time0.tv_usec; itime += time / 1000000.0; if(!quitting) { printf("done. transforms: %d, err = %0.5f, (%0.2f real, %0.4f ms/tran, ETA ", num_tran, err, time / 1000000.0, time / (float) (num_tran * 1000)); if(m == 0 && k == ks) printf("NA"); else print_time_from_seconds((int) (itime * ((double) itran_tot/itran_done - 1) + ptime * ((double) ptran_tot / ptran_done - 1))); } printf(")\n"); time0.tv_sec = time1.tv_sec; time0.tv_usec = time1.tv_usec; num_tran = 0; tran_save = 0; checkpoint_int = (ke - ks) / 2 * e + count1 * nrp / (double) rpt; int chkp_per_pass; chkp_per_pass = RINT_x86(checkpoint_int / checkpoint_iter); if(chkp_per_pass == 0) chkp_per_pass = 1; int next_checkpoint = ke - 1; checkpoint_int = (ke - ks + 1) / (double) chkp_per_pass; checkpoint_bnd = ks - 2.0; while((int) checkpoint_bnd < k) checkpoint_bnd += checkpoint_int; next_checkpoint = RINT_x86(checkpoint_bnd); next_checkpoint |= 1; for( ; k < ke && !quitting; k += 2) { int t_last = -1; { i = nrp - 1; while(i && !mpz_tstbit (control, t + i)) i--; if(i) t_last = t + i; } for(j = 0; j < nrp; j++) { if(mpz_tstbit (control, t)) { E_sub_mul(g_x, g_x, &e_data[0], &rp_data[j * n], n, err, t == t_last); num_tran += 2; if(num_tran % 200 == 0) { cutilSafeCall (hipMemcpy (&err, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(err > 0.4) quitting = 2; } else if(polite_f && num_tran % (2 * polite) == 0) cutilSafeThreadSync(); } t++; } _kbhit(); t += rpt - nrp; if(!quitting) { if(k < ke - 1) num_tran = next_base1(k, e, n, num_tran, &err); if(num_tran % 200 < 2 * e) { cutilSafeCall (hipMemcpy (&err, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(err > 0.4) quitting = 2; } else if(polite_f && num_tran % (2 * polite) < 2 * e) cutilSafeThreadSync(); } if(k == next_checkpoint || quitting == 1) { checkpoint_bnd += checkpoint_int; next_checkpoint = RINT_x86(checkpoint_bnd); next_checkpoint |= 1; if(quitting == 1) cutilSafeCall (hipMemcpy (&err, g_err, sizeof (float), hipMemcpyDeviceToHost)); if(err <= 0.4f) { cutilSafeCall (hipMemcpy (x_int, &g_xint[n], sizeof (int) * n, hipMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); pack_bits_int(x_int, x_packed, q, n); x_packed[end + 13] = nrp; if(k < ke - 1) { x_packed[end + 14] = m; x_packed[end + 15] = k + 2; x_packed[end + 16] = t; } else { x_packed[end + 14] = m + nrp; x_packed[end + 15] = ks; x_packed[end + 16] = m + nrp; } gettimeofday (&time1, NULL); time = 1000000.0 * (double)(time1.tv_sec - time0.tv_sec) + time1.tv_usec - time0.tv_usec; ptime += time / 1000000.0; x_packed[end + 18] = itran_done; x_packed[end + 19] = ptran_done + num_tran; x_packed[end + 20] = itime; x_packed[end + 21] = ptime; write_st2_checkpoint(x_packed, q); printf ("Transforms: %5d ", num_tran - tran_save); printbits_int (x_int, q, n, 0, 0, NULL, 0); printf (" err = %5.5f (", err); print_time_from_seconds ((int) time1.tv_sec - time0.tv_sec); printf (" real, %4.4f ms/tran, ETA ", time / 1000.0 / (num_tran - tran_save)); print_time_from_seconds((int) itime * ((double) itran_tot/itran_done - 1) + ptime * ((double) ptran_tot / (ptran_done + num_tran) - 1)); printf(")\n"); fflush(stdout); tran_save = num_tran; time0.tv_sec = time1.tv_sec; time0.tv_usec = time1.tv_usec; reset_err(&err, 0.85f); } } } k = ks; m += nrp; t = m; if(rpt - m < nrp) nrp = rpt - m; ptran_done += num_tran; num_tran = 0; printf("\n"); } while(m < rpt && !quitting); if(quitting < 2) { if(!quitting) printf("Stage 2 complete, %d transforms, estimated total time = ", ptran_done + itran_done); else printf("Quitting, estimated time spent = "); print_time_from_seconds((int) itime + ptime); printf("\n"); } else if (quitting == 2) printf ("err = %5.5g >= 0.40, quitting.\n", err); free(rp_gaps); cutilSafeCall (hipFree ((char *) e_data)); cutilSafeCall (hipFree ((char *) rp_data)); mpz_clear (control); return 0; } int check_pm1 (int q, char *expectedResidue) { int n, j, last = 0; int error_flag, checkpoint_flag; int *x_int = NULL; unsigned *x_packed = NULL; float maxerr = 0.0f, terr; int restarting = 0; timeval time0, time1; int total_time = 0, start_time; int j_resume = 0; int bit; unsigned *control = NULL; int stage = 0, st1_factor = 0; size_t global_mem, free_mem, use_mem; int nrp = g_nrp; signal (SIGTERM, SetQuitting); signal (SIGINT, SetQuitting); hipMemGetInfo(&free_mem, &global_mem); #ifdef _MSC_VER printf("CUDA reports %IuM of %IuM GPU memory free.\n",free_mem/1024/1024, global_mem/1024/1024); #else printf("CUDA reports %zuM of %zuM GPU memory free.\n",free_mem/1024/1024, global_mem/1024/1024); #endif do { /* while (restarting) */ maxerr = 0.0; if(stage == 0) { if(!x_packed) x_packed = read_checkpoint_packed(q); x_int = init_lucas_packed_int (x_packed, q, &n, &j, &stage, &total_time); } if(!x_int) exit (2); if(stage == 2) { if(read_st2_checkpoint(q, x_packed)) { printf("Stage 2 checkpoint found.\n"); int end = (q + 31) / 32; b1 = x_packed[end + 5]; } else printf("No stage 2 checkpoint.\n"); } g_d = g_d_commandline; if(g_nrp == 0) nrp = ((free_mem - (size_t) unused_mem * 1024 * 1024)/ n / 8 - 7); #ifdef _MSC_VER if (nrp > (4096/sizeof(double))*1024*1024/n) nrp = (4096/sizeof(double))*1024*1024/n; // Max single allocation of 4 GB on Windows? #endif if(nrp < 4) nrp = 4; use_mem = (size_t) (8 * (nrp + 7)* (size_t) n); #ifdef _MSC_VER printf("Using up to %IuM GPU memory.\n",use_mem/1024/1024); #else printf("Using up to %zuM GPU memory.\n",use_mem/1024/1024); #endif if (free_mem < use_mem) printf("WARNING: There may not be enough GPU memory for stage 2!\n"); double successrate = 0.0; if ((g_b1_commandline == 0) || (g_b2_commandline == 0)) { guess_pminus1_bounds(q, tfdepth, llsaved, nrp, &b1, &g_b2, &successrate); } if (g_b1_commandline > 0) b1 = g_b1_commandline; if (g_b2_commandline > 0) g_b2 = g_b2_commandline; if ((g_b1_commandline == 0) && (g_b2_commandline == 0)) printf("Selected B1=%d, B2=%d, %0.3g%% chance of finding a factor\n",b1,g_b2,successrate*100); if(x_packed[(q + 31)/32 + 5] == 0 || restarting) x_packed[(q + 31)/32 + 5] = b1; else { b1 = x_packed[(q + 31)/32 + 5]; printf("Using B1 = %d from savefile.\n", b1); fflush(stdout); } if (g_b2 > 1000000000) printf("WARNING: Expected failure with B2 > 1000000000!\n"); //max B2 supported? fflush(stdout); if(stage == 1) { if(control) free(control); control = get_control(&last, b1, 0, q); } gettimeofday (&time0, NULL); start_time = time0.tv_sec; restarting = 0; if(j == 1) { printf ("Starting stage 1 P-1, M%d, B1 = %d, B2 = %d, fft length = %dK\n", q, b1, g_b2, n/1024); printf ("Doing %d iterations\n", last); //restarting = round_off_test(q, n, &j, control, last); //if(restarting) stage = 0; } else { if(stage == 1) { printf ("Continuing stage 1 from a partial result of M%d fft length = %dK, iteration = %d\n", q, n/1024, j); j_resume = j % checkpoint_iter - 1; } else { printf ("Continuing stage 2 from a partial result of M%d fft length = %dK\n", q, n/1024); } } fflush (stdout); for (; !restarting && j <= last; j++) // Main LL loop { if ((j % 100) == 0) error_flag = 1; else error_flag = 0; if ((j % checkpoint_iter == 0) || j == last) checkpoint_flag = 1; else checkpoint_flag = error_flag; bit = get_bit(last - j, control); terr = lucas_square (q, n, j, last, &maxerr, error_flag, bit, stage, checkpoint_flag); if(quitting == 1 && !checkpoint_flag) { j++; bit = get_bit(last - j, control); terr = lucas_square (q, n, j, last, &maxerr, 1, bit, stage, 1); } if (error_flag || quitting == 1) { if (terr >= 0.40) { printf ("Iteration = %d, err = %5.5g >= 0.40, quitting.\n", j, terr); quitting = 2; } } if ((j % checkpoint_iter) == 0 || quitting) { if(quitting < 2) { cutilSafeCall (hipMemcpy (x_int, g_xint, sizeof (int) * n, hipMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); gettimeofday (&time1, NULL); total_time += (time1.tv_sec - start_time); start_time = time1.tv_sec; set_checkpoint_data(x_packed, q, n, j + 1, stage, total_time); pack_bits_int(x_int, x_packed, q, n); write_checkpoint_packed (x_packed, q); } if(quitting == 0) { printf ("Iteration %d ", j); printbits_int (x_int, q, n, 0, 0, NULL, 0); long long diff = time1.tv_sec - time0.tv_sec; long long diff1 = 1000000 * diff + time1.tv_usec - time0.tv_usec; long long diff2 = (last - j) * diff1 / ((checkpoint_iter - j_resume) * 1e6); gettimeofday (&time0, NULL); printf (" err = %5.5f (", maxerr); print_time_from_seconds ((int) diff); printf (" real, %4.4f ms/iter, ETA ", diff1 / 1000.0 / (checkpoint_iter - j_resume)); print_time_from_seconds ((int) diff2); printf (")\n"); fflush (stdout); if(j_resume) j_resume = 0; reset_err(&maxerr, 0.85); // Instead of tracking maxerr over whole run, reset it at each checkpoint. } else { printf("Estimated time spent so far: "); print_time_from_seconds(total_time); printf("\n\n"); j = last + 1; } } if ( k_f && !quitting && (!(j & 15)) && _kbhit()) interact(); // abstracted to clean up check() fflush (stdout); } if (!restarting && !quitting) { // done with stage 1 if(stage == 1) { free ((char *) control); gettimeofday (&time1, NULL); cutilSafeCall (hipMemcpy (x_int, g_xint, sizeof (int) * n, hipMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); if(g_eb1 > b1) stage = 3; else if(g_b2 > b1) stage = 2; set_checkpoint_data(x_packed, q, n, j + 1, stage, total_time); pack_bits_int(x_int, x_packed, q, n); write_checkpoint_packed (x_packed, q); printbits_int (x_int, q, n, 0, NULL , 0, 1); total_time += (time1.tv_sec - start_time); printf ("\nStage 1 complete, estimated total time = "); print_time_from_seconds(total_time); fflush (stdout); printf("\nStarting stage 1 gcd.\n"); st1_factor = get_gcd(/*x,*/ x_packed, q, n, 1); } if(!st1_factor) { if (stage == 3) { printf("Here's where we put the b1 extension calls\n"); stage = 2; } if(stage == 2) { printf("Starting stage 2.\n"); stage2(x_int, x_packed, q, n, nrp, maxerr); if(!quitting) { printf("Starting stage 2 gcd.\n"); get_gcd(x_packed, q, n, 2); rm_checkpoint(q, keep_s1); } } } printf("\n"); } close_lucas (x_int); } while (restarting); free ((char *) x_packed); return (0); } void parse_args(int argc, char *argv[], int* q, int* device_numer, int* cufftbench_s, int* cufftbench_e, int* cufftbench_d); /* The rest of the opts are global */ int main (int argc, char *argv[]) { printf("%s\n",program); quitting = 0; #define THREADS_DFLT 256 #define CHECKPOINT_ITER_DFLT 10000 #define SAVE_FOLDER_DFLT "savefiles" #define S_F_DFLT 0 #define T_F_DFLT 0 #define K_F_DFLT 0 #define D_F_DFLT 0 #define POLITE_DFLT 1 #define UNMEM_DFLT 100; #define WORKFILE_DFLT "worktodo.txt" #define RESULTSFILE_DFLT "results.txt" /* "Production" opts to be read in from command line or ini file */ int q = -1; int device_number = -1, f_f = 0; checkpoint_iter = -1; threads1 = -1; fftlen = -1; unused_mem = -1; s_f = t_f = d_f = k_f = -1; polite_f = polite = -1; AID[0] = input_filename[0] = RESULTSFILE[0] = 0; /* First character is null terminator */ char fft_str[132] = "\0"; /* Non-"production" opts */ r_f = 0; int cufftbench_s, cufftbench_e, cufftbench_d; cufftbench_s = cufftbench_e = cufftbench_d = 0; parse_args(argc, argv, &q, &device_number, &cufftbench_s, &cufftbench_e, &cufftbench_d); /* The rest of the args are globals */ if (file_exists(INIFILE)) { if( checkpoint_iter < 1 && !IniGetInt(INIFILE, "CheckpointIterations", &checkpoint_iter, CHECKPOINT_ITER_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option CheckpointIterations; using default: %d\n", CHECKPOINT_ITER_DFLT)*/; if( threads1 < 1 && !IniGetInt(INIFILE, "Threads", &threads1, THREADS_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option Threads; using default: %d\n", THREADS_DFLT); if( s_f < 0 && !IniGetInt(INIFILE, "SaveAllCheckpoints", &s_f, S_F_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option SaveAllCheckpoints; using default: off\n")*/; if( s_f > 0 && !IniGetStr(INIFILE, "SaveFolder", folder, SAVE_FOLDER_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option SaveFolder; using default: \"%s\"\n", SAVE_FOLDER_DFLT)*/; if( t_f < 0 && !IniGetInt(INIFILE, "CheckRoundoffAllIterations", &t_f, 0) ) fprintf(stderr, "Warning: Couldn't parse ini file option CheckRoundoffAllIterations; using default: off\n"); if(!IniGetInt(INIFILE, "KeepStage1SaveFile", &keep_s1, 0) ) keep_s1 = 0; if( polite < 0 && !IniGetInt(INIFILE, "Polite", &polite, POLITE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option Polite; using default: %d\n", POLITE_DFLT); if( k_f < 0 && !IniGetInt(INIFILE, "Interactive", &k_f, 0) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option Interactive; using default: off\n")*/; if( device_number < 0 && !IniGetInt(INIFILE, "DeviceNumber", &device_number, 0) ) fprintf(stderr, "Warning: Couldn't parse ini file option DeviceNumber; using default: 0\n"); if( d_f < 0 && !IniGetInt(INIFILE, "PrintDeviceInfo", &d_f, D_F_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option PrintDeviceInfo; using default: off\n")*/; if( !input_filename[0] && !IniGetStr(INIFILE, "WorkFile", input_filename, WORKFILE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option WorkFile; using default \"%s\"\n", WORKFILE_DFLT); /* I've readded the warnings about worktodo and results due to the multiple-instances-in-one-dir feature. */ if( !RESULTSFILE[0] && !IniGetStr(INIFILE, "ResultsFile", RESULTSFILE, RESULTSFILE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option ResultsFile; using default \"%s\"\n", RESULTSFILE_DFLT); if( fftlen < 0 && !IniGetStr(INIFILE, "FFTLength", fft_str, "\0") ) /*fprintf(stderr, "Warning: Couldn't parse ini file option FFTLength; using autoselect.\n")*/; if( unused_mem < 0 && !IniGetInt(INIFILE, "UnusedMem", &unused_mem, 100) ) printf("Warning: Couldn't parse ini file option UnusedMem; using default.\n"); } else // no ini file { fprintf(stderr, "Warning: Couldn't find .ini file. Using defaults for non-specified options.\n"); if( checkpoint_iter < 1 ) checkpoint_iter = CHECKPOINT_ITER_DFLT; if( threads1 < 1 ) threads1 = THREADS_DFLT; if( fftlen < 0 ) fftlen = 0; if( s_f < 0 ) s_f = S_F_DFLT; if( t_f < 0 ) t_f = T_F_DFLT; if( k_f < 0 ) k_f = K_F_DFLT; if( device_number < 0 ) device_number = 0; if( d_f < 0 ) d_f = D_F_DFLT; if( polite < 0 ) polite = POLITE_DFLT; if( unused_mem < 0 ) unused_mem = UNMEM_DFLT; if( !input_filename[0] ) sprintf(input_filename, WORKFILE_DFLT); if( !RESULTSFILE[0] ) sprintf(RESULTSFILE, RESULTSFILE_DFLT); } if( fftlen < 0 ) { // possible if -f not on command line fftlen = fft_from_str(fft_str); } if (polite == 0) { polite_f = 0; polite = 1; } else { polite_f = 1; } if (threads1 != 32 && threads1 != 64 && threads1 != 128 && threads1 != 256 && threads1 != 512 && threads1 != 1024) { fprintf(stderr, "Error: thread count is invalid.\n"); fprintf(stderr, "Threads must be 2^k, 5 <= k <= 10.\n\n"); exit(2); } f_f = fftlen; // if the user has given an override... then note this length must be kept between tests init_device (device_number); fft_count = init_ffts(); if (cufftbench_d) cufftbench (cufftbench_s, cufftbench_e, cufftbench_d, device_number); else { if (s_f) { #ifndef _MSC_VER mode_t mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; if (mkdir (folder, mode) != 0) fprintf (stderr, "mkdir: cannot create directory `%s': File exists\n", folder); #else if (_mkdir (folder) != 0) fprintf (stderr, "mkdir: cannot create directory `%s': File exists\n", folder); #endif } if (q <= 0) { int error; #ifdef EBUG printf("Processed INI file and console arguments correctly; about to call get_next_assignment().\n"); #endif do { // while(!quitting) fftlen = f_f; // fftlen and AID change between tests, so be sure to reset them AID[0] = 0; error = get_next_assignment(input_filename, &q, &fftlen, &tfdepth, &llsaved, &AID); /* Guaranteed to write to fftlen ONLY if specified on workfile line, so that if unspecified, the pre-set default is kept. */ if( error > 0) exit (2); // get_next_assignment prints warning message #ifdef EBUG printf("Gotten assignment, about to call check().\n"); #endif check_pm1 (q, 0); if(!quitting) // Only clear assignment if not killed by user, i.e. test finished { error = clear_assignment(input_filename, q); if(error) exit (2); // prints its own warnings } } while(!quitting); } else // Exponent passed in as argument { if (!valid_assignment(q, fftlen)) {printf("\n");} //! v_a prints warning else { //int trft = 0; //while(!trft) { check_pm1 (q, 0); //q += 2; //while(!isprime(q)) q += 2; } } } } // end if(-r) else if(-cufft) else(workfile) } // end main() void parse_args(int argc, char *argv[], int* q, int* device_number, int* cufftbench_s, int* cufftbench_e, int* cufftbench_d) { while (argc > 1) { if (strcmp (argv[1], "-t") == 0) { t_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-h") == 0) { fprintf (stderr, "$ CUDAPm1 -h|-v\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] [-i inifile] [-threads 32|64|128|256|512|1024] [-c checkpoint_iteration] [-f fft_length] [-s folder] [-t] [-polite iteration] [-k] exponent|input_filename\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] [-i inifile] [-threads 32|64|128|256|512|1024] [-polite iteration] -r\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] -cufftbench start end distance\n\n"); fprintf (stderr, " -h print this help message\n"); fprintf (stderr, " -v print version number\n"); fprintf (stderr, " -info print device information\n"); fprintf (stderr, " -i set .ini file name (default = \"CUDAPm1.ini\")\n"); fprintf (stderr, " -threads set threads number (default = 256)\n"); fprintf (stderr, " -f set fft length (if round off error then exit)\n"); fprintf (stderr, " -s save all checkpoint files\n"); fprintf (stderr, " -t check round off error all iterations\n"); fprintf (stderr, " -polite GPU is polite every n iterations (default -polite 1) (-polite 0 = GPU aggressive)\n"); fprintf (stderr, " -cufftbench exec CUFFT benchmark (Ex. $ ./CUDAPm1 -d 1 -cufftbench 1179648 6291456 32768 )\n"); fprintf (stderr, " -r exec residue test.\n"); fprintf (stderr, " -k enable keys (p change -polite, t disable -t, s change -s)\n\n"); fprintf (stderr, " -b2 set b2\n\n"); fprintf (stderr, " -d2 Brent-Suyama coefficient (multiple of 30, 210, or 2310) \n\n"); fprintf (stderr, " -e2 Brent-Suyama exponent (2-12) \n\n"); //fprintf (stderr, // Now an internal parameter // " -nrp2 Relative primes per pass (divisor of 8, 48, or 480)\n\n"); exit (2); } else if (strcmp (argv[1], "-v") == 0) { printf("%s\n\n", program); exit (2); } else if (strcmp (argv[1], "-polite") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -polite option\n\n"); exit (2); } polite = atoi (argv[2]); if (polite == 0) { polite_f = 0; polite = 1; } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-r") == 0) { r_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-k") == 0) { k_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-d") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -d option\n\n"); exit (2); } *device_number = atoi (argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-i") == 0) { if(argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -i option\n\n"); exit (2); } sprintf (INIFILE, "%s", argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-info") == 0) { d_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-cufftbench") == 0) { if (argc < 5 || argv[2][0] == '-' || argv[3][0] == '-' || argv[4][0] == '-') { fprintf (stderr, "can't parse -cufftbench option\n\n"); exit (2); } *cufftbench_s = atoi (argv[2]); *cufftbench_e = atoi (argv[3]); *cufftbench_d = atoi (argv[4]); argv += 4; argc -= 4; } else if (strcmp (argv[1], "-threads") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -threads option\n\n"); exit (2); } threads1 = atoi (argv[2]); if (threads1 != 32 && threads1 != 64 && threads1 != 128 && threads1 != 256 && threads1 != 512 && threads1 != 1024) { fprintf(stderr, "Error: thread count is invalid.\n"); fprintf(stderr, "Threads must be 2^k, 5 <= k <= 10.\n\n"); exit (2); } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-c") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -c option\n\n"); exit (2); } checkpoint_iter = atoi (argv[2]); if (checkpoint_iter == 0) { fprintf (stderr, "can't parse -c option\n\n"); exit (2); } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-f") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -f option\n\n"); exit (2); } fftlen = fft_from_str(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-b1") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -b1 option\n\n"); exit (2); } g_b1_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-e2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -e2 option\n\n"); exit (2); } g_e = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-d2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -d2 option\n\n"); exit (2); } g_d_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-b2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -b2 option\n\n"); exit (2); } g_b2_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-nrp2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -nrp option\n\n"); exit (2); } g_nrp = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-s") == 0) { s_f = 1; if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -s option\n\n"); exit (2); } sprintf (folder, "%s", argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-eb1") == 0) { s_f = 1; if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -eb1 option\n\n"); exit (2); } g_eb1 = atoi(argv[2]); argv += 2; argc -= 2; } else { if (*q != -1 || strcmp (input_filename, "") != 0 ) { fprintf (stderr, "can't parse options\n\n"); exit (2); } int derp = atoi (argv[1]); if (derp == 0) { sprintf (input_filename, "%s", argv[1]); } else { *q = derp; *q |= 1; while(!isprime(*q)) *q += 2; } argv++; argc--; } } if (g_d_commandline%30 != 0) { printf("-d2 must be a multiple of 30, 210, or 2310.\n"); exit(3); } if ((g_e%2 != 0) || (g_e < 0) || (g_e > 12)) { printf("-e2 must be 2, 4, 6, 8, 10, or 12.\n"); exit(3); } } int interact(void) { int c = getchar (); if (c == 'p') if (polite_f) { polite_f = 0; printf (" -polite 0\n"); } else { polite_f = 1; printf (" -polite %d\n", polite); } else if (c == 't') { t_f = 0; printf (" disabling -t\n"); } else if (c == 's') if (s_f == 1) { s_f = 2; printf (" disabling -s\n"); } else if (s_f == 2) { s_f = 1; printf (" enabling -s\n"); } if (c == 'F') { printf(" -- Increasing fft length.\n"); fftlen++; return 1; } if (c == 'f') { printf(" -- Decreasing fft length.\n"); fftlen--; return 1; } if (c == 'k') { printf(" -- fft length reset cancelled.\n"); return 2; } fflush (stdin); return 0; }
803c70570b438a4bde577f95dcb66c2944068692.cu
char program[] = "CUDAPm1 v0.20"; /* CUDALucas.c Shoichiro Yamada Oct. 2010 This is an adaptation of Richard Crandall lucdwt.c, John Sweeney MacLucasUNIX.c, and Guillermo Ballester Valor MacLucasFFTW.c code. Improvement From Prime95. It also contains mfaktc code by Oliver Weihe and Eric Christenson adapted for CUDALucas use. Such code is under the GPL, and is noted as such. */ /* Include Files */ #include <stdlib.h> #include <stdio.h> #include <gmp.h> #include <math.h> #include <assert.h> #include <time.h> #include <signal.h> #include <sys/types.h> #include <sys/stat.h> #ifdef _MSC_VER //#define stat _stat #define strncasecmp strnicmp // _strnicmp #include <direct.h> #endif #include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include "cuda_safecalls.h" #include "parse.h" /* In order to have the gettimeofday() function, you need these includes on Linux: #include <sys/time.h> #include <unistd.h> On Windows, you need #include <winsock2.h> and a definition for int gettimeofday (struct timeval *tv, struct timezone *) {} Both platforms are taken care of in parse.h and parse.c. */ /************************ definitions ************************************/ /* http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html base code from Takuya OOURA. */ /* global variables needed */ double *g_ttmp, *g_ttp1; double *g_x, *g_ct; double *e_data; double *rp_data; int *g_xint; char *size; int threads1, threads2 = 128, threads3= 128; float *g_err, g_max_err = 0.0f; int *g_datai, *g_carryi; long long int *g_datal, *g_carryl; cufftHandle plan; cudaDeviceProp dev; int fft_count; int multipliers[250]; int quitting, checkpoint_iter, fftlen, tfdepth=74, llsaved=2, s_f, t_f, r_f, d_f, k_f; int unused_mem = 100; int polite, polite_f; int b1 = 0, g_b1_commandline = 0; int g_b2 = 0, g_b2_commandline = 0; int g_d = 0, g_d_commandline = 0; int g_e = 0; int g_nrp = 0; int g_eb1 = 0; int keep_s1 = 0; char folder[132]; char input_filename[132], RESULTSFILE[132]; char INIFILE[132] = "CUDAPm1.ini"; char AID[132]; // Assignment key char s_residue[32]; __constant__ double g_ttp_inc[2]; __constant__ int g_qn[2]; /************************ kernels ************************************/ # define RINT_x86(x) (floor(x+0.5)) # define RINT(x) __rintd(x) void set_ttp_inc(double *h_ttp_inc){ cudaMemcpyToSymbol(g_ttp_inc, h_ttp_inc, 2 * sizeof(double)); } void set_qn(int *h_qn){ cudaMemcpyToSymbol(g_qn, h_qn, 2 * sizeof(int)); } __global__ void square (int n, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; //double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; xi = 2.0 * ajr * aji; xr = (ajr - aji) * (ajr + aji); yi = 2.0 * akr * aki; yr = (akr - aki) * (akr + aki); ajr = xr - yr; aji = xi + yi; akr = wkr * ajr + wki * aji; aki = wkr * aji - wki * ajr; a[j] = xr - akr; a[1 + j] = aki - xi; a[nminusj] = yr + akr; a[1 + nminusj] = aki - yi; } else { xr = a[0]; xi = a[1]; a[0] = xr * xr + xi * xi; a[1] = -xr * xi - xi * xr; xr = a[0 + m]; xi = a[1 + m]; a[1 + m] = -xr * xi - xi * xr; a[0 + m] = xr * xr - xi * xi; } } __global__ void square1 (int n, double *b, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; new_aji = 2.0 * ajr * aji; new_ajr = (ajr - aji) * (ajr + aji); new_aki = 2.0 * akr * aki; new_akr = (akr - aki) * (akr + aki); xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; b[j] = new_ajr - yr; b[1 + j] = yi - new_aji; b[nminusj] = new_akr + yr; b[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; b[0] = xr * xr + xi * xi; b[1] = -xr * xi - xi * xr; xr = a[0 + m]; xi = a[1 + m]; b[1 + m] = -xr * xi - xi * xr; b[0 + m] = xr * xr - xi * xi; } } __global__ void mult2 (double *g_out, double *a, double *b, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; xr = b[j]; xi = b[1 + j]; yr = b[nminusj]; yi = b[1 + nminusj]; new_aji = ajr * xi + xr * aji; new_ajr = ajr * xr - aji * xi; new_aki = akr * yi + yr * aki; new_akr = akr * yr - aki * yi; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b[0]; yi = b[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b[0 + m]; yi = b[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void mult3 (double *g_out, double *a, double *b, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki, bjr, bji, bkr, bki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; bjr = b[j]; bji = b[1 + j]; bkr = b[nminusj]; bki = b[1 + nminusj]; xr = bjr - bkr; xi = bji + bki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; bjr -= yr; bji -= yi; bkr += yr; bki -= yi; new_aji = ajr * bji + bjr * aji; new_ajr = ajr * bjr - aji * bji; new_aki = akr * bki + bkr * aki; new_akr = akr * bkr - aki * bki; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b[0]; yi = b[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b[0 + m]; yi = b[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void sub_mul (double *g_out, double *a, double *b1, double *b2, double *ct, int n) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki, bjr, bji, bkr, bki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; bjr = b1[j] - b2[j]; bji = b1[1 + j] - b2[1 + j]; bkr = b1[nminusj] - b2[nminusj]; bki = b1[1 + nminusj] - b2[1 + nminusj]; new_aji = ajr * bji + bjr * aji; new_ajr = ajr * bjr - aji * bji; new_aki = akr * bki + bkr * aki; new_akr = akr * bkr - aki * bki; xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; g_out[j] = new_ajr - yr; g_out[1 + j] = yi - new_aji; g_out[nminusj] = new_akr + yr; g_out[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; yr = b1[0] - b2[0]; yi = b1[1] - b2[1]; g_out[0] = xr * yr + xi * yi; g_out[1] = -xr * yi - xi * yr; xr = a[0 + m]; xi = a[1 + m]; yr = b1[0 + m] - b2[0 + m]; yi = b1[1 + m] - b2[1 + m]; g_out[1 + m] = -xr * yi - xi * yr; g_out[0 + m] = xr * yr - xi * yi; } } __global__ void pre_mul (int n, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; xr = ajr - akr; xi = aji + aki; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; ajr -= yr; aji -= yi; akr += yr; aki -= yi; a[j] = ajr; a[1 + j] = aji; a[nminusj] = akr; a[1 + nminusj] = aki; } } __device__ static double __rintd (double z) { double y; asm ("cvt.rni.f64.f64 %0, %1;": "=d" (y):"d" (z)); return (y); } __global__ void apply_weights (double *g_out, int *g_in, double *g_ttmp) { int val[2], test = 1; double ttp_temp[2]; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; val[0] = g_in[index]; val[1] = g_in[index + 1]; ttp_temp[0] = g_ttmp[index]; ttp_temp[1] = g_ttmp[index + 1]; if(ttp_temp[0] < 0.0) test = 0; if(ttp_temp[1] < 0.0) ttp_temp[1] = -ttp_temp[1]; g_out[index + 1] = (double) val[1] * ttp_temp[1]; ttp_temp[1] *= -g_ttp_inc[test]; g_out[index] = (double) val[0] * ttp_temp[1]; } __global__ void norm1a (double *g_in, int *g_data, int *g_xint, double *g_ttmp, int *g_carry, volatile float *g_err, float maxerr, int g_err_flag) { long long int bigint[2]; int val[2], numbits[2] = {g_qn[0],g_qn[0]}, mask[2], shifted_carry; double ttp_temp; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int index1 = blockIdx.x << 1; __shared__ int carry[1024 + 1]; { double tval[2], trint[2]; float ferr[2]; tval[0] = g_ttmp[index]; ttp_temp = g_ttmp[index + 1]; trint[0] = g_in[index]; trint[1] = g_in[index + 1]; if(tval[0] < 0.0) { numbits[0]++; tval[0] = -tval[0]; } if(ttp_temp < 0.0) { numbits[1]++; ttp_temp = -ttp_temp; } tval[1] = tval[0] * g_ttp_inc[numbits[0] == g_qn[0]]; tval[0] = trint[0] * tval[0]; tval[1] = trint[1] * tval[1]; trint[0] = RINT (tval[0]); ferr[0] = tval[0] - trint[0]; ferr[0] = fabs (ferr[0]); bigint[0] = (long long int) trint[0]; trint[1] = RINT (tval[1]); ferr[1] = tval[1] - trint[1]; ferr[1] = fabs (ferr[1]); bigint[1] = (long long int) trint[1]; mask[0] = -1 << numbits[0]; mask[1] = -1 << numbits[1]; if(ferr[0] < ferr[1]) ferr[0] = ferr[1]; if (ferr[0] > maxerr) atomicMax((int*) g_err, __float_as_int(ferr[0])); } val[1] = ((int) bigint[1]) & ~mask[1]; carry[threadIdx.x + 1] = (int) (bigint[1] >> numbits[1]); val[0] = ((int) bigint[0]) & ~mask[0]; val[1] += (int) (bigint[0] >> numbits[0]); __syncthreads (); if (threadIdx.x) val[0] += carry[threadIdx.x]; shifted_carry = val[1] - (mask[1] >> 1); val[1] = val[1] - (shifted_carry & mask[1]); carry[threadIdx.x] = shifted_carry >> numbits[1]; shifted_carry = val[0] - (mask[0] >> 1); val[0] = val[0] - (shifted_carry & mask[0]); val[1] += shifted_carry >> numbits[0]; __syncthreads (); if (threadIdx.x == (blockDim.x - 1)) { if (blockIdx.x == gridDim.x - 1) g_carry[0] = carry[threadIdx.x + 1] + carry[threadIdx.x]; else g_carry[blockIdx.x + 1] = carry[threadIdx.x + 1] + carry[threadIdx.x]; } if (threadIdx.x) { val[0] += carry[threadIdx.x - 1]; { g_in[index + 1] = (double) val[1] * ttp_temp; ttp_temp *= -g_ttp_inc[numbits[0] == g_qn[0]]; g_in[index] = (double) val[0] * ttp_temp; } if(g_err_flag) { g_xint[index + 1] = val[1]; g_xint[index] = val[0]; } } else { g_data[index1] = val[0]; g_data[index1 + 1] = val[1]; } } __global__ void norm1b (double *g_in, long long int *g_data, int *g_xint, double *g_ttmp, long long int *g_carry, volatile float *g_err, float maxerr, int g_err_flag) { long long int bigint[2], shifted_carry; int numbits[2] = {g_qn[0],g_qn[0]}, mask[2]; double ttp_temp; const int index = (blockIdx.x * blockDim.x + threadIdx.x) << 1; const int index1 = blockIdx.x << 1; __shared__ long long int carry[1024 + 1]; { double tval[2], trint[2]; float ferr[2]; tval[0] = g_ttmp[index]; ttp_temp = g_ttmp[index + 1]; trint[0] = g_in[index]; trint[1] = g_in[index + 1]; if(tval[0] < 0.0) { numbits[0]++; tval[0] = -tval[0]; } if(ttp_temp < 0.0) { numbits[1]++; ttp_temp = -ttp_temp; } tval[1] = tval[0] * g_ttp_inc[numbits[0] == g_qn[0]]; tval[0] = trint[0] * tval[0]; tval[1] = trint[1] * tval[1]; trint[0] = RINT (tval[0]); ferr[0] = tval[0] - trint[0]; ferr[0] = fabs (ferr[0]); bigint[0] = (long long int) trint[0]; trint[1] = RINT (tval[1]); ferr[1] = tval[1] - trint[1]; ferr[1] = fabs (ferr[1]); bigint[1] = (long long int) trint[1]; mask[0] = -1 << numbits[0]; mask[1] = -1 << numbits[1]; if(ferr[0] < ferr[1]) ferr[0] = ferr[1]; if (ferr[0] > maxerr) atomicMax((int*) g_err, __float_as_int(ferr[0])); } bigint[0] *= 3; bigint[1] *= 3; carry[threadIdx.x + 1] = (bigint[1] >> numbits[1]); bigint[1] = bigint[1] & ~mask[1]; bigint[1] += bigint[0] >> numbits[0]; bigint[0] = bigint[0] & ~mask[0]; __syncthreads (); if (threadIdx.x) bigint[0] += carry[threadIdx.x]; shifted_carry = bigint[1] - (mask[1] >> 1); bigint[1] = bigint[1] - (shifted_carry & mask[1]); carry[threadIdx.x] = shifted_carry >> numbits[1]; shifted_carry = bigint[0] - (mask[0] >> 1); bigint[0] = bigint[0] - (shifted_carry & mask[0]); bigint[1] += shifted_carry >> numbits[0]; __syncthreads (); if (threadIdx.x == (blockDim.x - 1)) { if (blockIdx.x == gridDim.x - 1) g_carry[0] = carry[threadIdx.x + 1] + carry[threadIdx.x]; else g_carry[blockIdx.x + 1] = carry[threadIdx.x + 1] + carry[threadIdx.x]; } if (threadIdx.x) { bigint[0] += carry[threadIdx.x - 1]; { g_in[index + 1] = (double) bigint[1] * ttp_temp; ttp_temp *= -g_ttp_inc[numbits[0] == g_qn[0]]; g_in[index] = (double) bigint[0] * ttp_temp; } if(g_err_flag) { g_xint[index + 1] = bigint[1]; g_xint[index] = bigint[0]; } } else { g_data[index1] = bigint[0]; g_data[index1 + 1] = bigint[1]; } } __global__ void norm2a (double *g_x, int *g_xint, int g_N, int threads1, int *g_data, int *g_carry, double *g_ttp1, int g_err_flag) { const int threadID = blockIdx.x * blockDim.x + threadIdx.x; const int threadID1 = threadID << 1; const int j = (threads1 * threadID) << 1; int temp0, temp1; int mask, shifted_carry, numbits= g_qn[0]; double temp; if (j < g_N) { temp0 = g_data[threadID1] + g_carry[threadID]; temp1 = g_data[threadID1 + 1]; temp = g_ttp1[threadID]; if(temp < 0.0) { numbits++; temp = -temp; } mask = -1 << numbits; shifted_carry = temp0 - (mask >> 1) ; temp0 = temp0 - (shifted_carry & mask); temp1 += (shifted_carry >> numbits); { g_x[j + 1] = temp1 * temp; temp *= -g_ttp_inc[numbits == g_qn[0]]; g_x[j] = temp0 * temp; } if(g_err_flag) { g_xint[j + 1] = temp1; g_xint[j] = temp0; } } } __global__ void norm2b (double *g_x, int *g_xint, int g_N, int threads1, long long int *g_data, long long int *g_carry, double *g_ttp1, int g_err_flag) { const int threadID = blockIdx.x * blockDim.x + threadIdx.x; const int threadID1 = threadID << 1; const int j = (threads1 * threadID) << 1; long long int shifted_carry, temp0, temp1; int mask, numbits = g_qn[0]; double temp; if (j < g_N) { temp0 = g_data[threadID1] + g_carry[threadID]; temp1 = g_data[threadID1 + 1]; temp = g_ttp1[threadID]; if(temp < 0.0) { numbits++; temp = -temp; } mask = -1 << numbits; shifted_carry = temp0 - (mask >> 1) ; temp0 = temp0 - (shifted_carry & mask); temp1 = temp1 + (shifted_carry >> numbits); g_x[j + 1] = temp1 * temp; temp *= -g_ttp_inc[numbits == g_qn[0]]; g_x[j] = temp0 * temp; if(g_err_flag) { g_xint[j + 1] = temp1; g_xint[j] = temp0; } } } __global__ void copy_kernel (double *save, double *y) { const int threadID = (blockIdx.x * blockDim.x + threadIdx.x) << 1; save[threadID] = y[threadID]; save[threadID + 1] = y[threadID + 1]; } /**************************************************************************** * Erato * ***************************************************************************/ //Many thanks to Ben Buhrow. typedef unsigned char u8; typedef unsigned int uint32; typedef unsigned char uint8; typedef unsigned short uint16; typedef long long unsigned int uint64; const int threadsPerBlock = 256; const uint32 block_size = 8192; const int startprime = 8; __constant__ uint32 _step5[5] = {2418280706,604570176,151142544,37785636,1083188233}; __constant__ uint32 _step7[7] = {1107363844,69210240,2151809288,134488080, 276840961,17302560,537952322}; __constant__ uint32 _step11[11] = {33816584,1073774848,135266336,132096,541065345, 528384,2164261380,2113536,67110928,8454146,268443712}; __constant__ uint32 _step13[13] = {1075838992,16809984,262656,536875016,8388672, 67239937,1050624,2147500064,33554688,268959748,4202496, 65664,134218754}; __constant__ uint32 _step17[17] = {268435488,1073741952,512,2049,8196,32784,131136, 524544,2098176,8392704,33570816,134283264,537133056, 2148532224,4194304,16777218,67108872}; __constant__ uint32 _step19[19] = {2147483712,4096,262176,16779264,1073872896,8388608, 536870928,1024,65544,4194816,268468224,2097152,134217732, 256,16386,1048704,67117056,524288,33554433}; __global__ static void SegSieve(uint32 *primes, int maxp, int nump, uint32 N, uint8 *results) { /* expect as input a set of primes to sieve with, how many of those primes there are (maxp) how many primes each thread will be responsible for (nump), and the maximum index that we need to worry about for the requested sieve interval. Also, an array into which we can put this block's count of primes. This routine implements a segmented sieve using a wheel mod 6. Each thread block on the gpu sieves a different segment of the number line. Each thread within each block simultaneously sieves a small set of primes, marking composites within shared memory. There is no memory contention between threads because the marking process is write only. Because each thread block starts at a different part of the number line, a small amount of computation must be done for each prime prior to sieving to figure out where to start. After sieving is done, each thread counts primes in part of the shared memory space; the final count is returned in the provided array for each block. The host cpu will do the final sum over blocks. Note, it would not be much more difficult to compute and return the primes in the block instead of just the count, but it would be slower due to the extra memory transfer required. */ uint32 i,j,k; uint32 maxID = (N + 1) / 3; uint32 bid = blockIdx.y * gridDim.x + blockIdx.x; uint32 range = block_size / threadsPerBlock; __shared__ uint8 locsieve[block_size]; __shared__ uint32 bitsieve[block_size / 32]; // everyone init the array. if ((bid+1)*block_size > maxID) { for (j=threadIdx.x * range, k=0; k<range; k++) { // we're counting hits in the kernel as well, so clear the bytes representing primes > N if ((bid * block_size + j + k) < maxID) locsieve[j+k] = 1; else locsieve[j+k] = 0; } } else { for (j=threadIdx.x * range/4, k=0; k<range/4; k++) { ((uint32 *) locsieve)[j+k] = 0x01010101; } } // the smallest primes are dealt with a bit differently. They are sieved in a separate // shared memory space in a packed bit array. constant memory holds pre-computed // information about where each prime lands within a given 32 bit region. each thread // in the block will use this info to simultaneously sieve a small portion of the // packed bit array (that way we make use of the broadcast capabilities of constant memory). // When counting or computing primes, we then have to check both the packed bit array as // well as the regular byte array, but overall it is a win to greatly speed up the // sieving of the smallest primes. // compute starting offset for prime 5: i = (bid * 256 + threadIdx.x) % 5; // then sieve prime 5 in the bit array bitsieve[threadIdx.x] = _step5[i]; // compute starting offset for prime 7: i = (bid * 256 + threadIdx.x) % 7; // then sieve prime 7 in the bit array bitsieve[threadIdx.x] |= _step7[i]; // compute starting offset for prime 11: i = (bid * 256 + threadIdx.x) % 11; // then sieve prime 11 in the bit array bitsieve[threadIdx.x] |= _step11[i]; // compute starting offset for prime 13: i = (bid * 256 + threadIdx.x) % 13; // then sieve prime 13 in the bit array bitsieve[threadIdx.x] |= _step13[i]; // compute starting offset for prime 17: i = (bid * 256 + threadIdx.x) % 17; // then sieve prime 17 in the bit array bitsieve[threadIdx.x] |= _step17[i]; // compute starting offset for prime 19: i = (bid * 256 + threadIdx.x) % 19; // then sieve prime 19 in the bit array bitsieve[threadIdx.x] |= _step19[i]; // regroup before sieving __syncthreads(); // now sieve the array for (j=0; j<nump; j++) { int pid = (j * threadsPerBlock) + threadIdx.x + startprime; if (pid < maxp) { uint32 p = primes[pid]; uint32 pstart = p/3; uint32 p2 = 2*p; uint32 block_start = bid * block_size; uint32 start_offset; uint32 s[2]; // the wheel sieve with all multiples of 2 and 3 removed from the array is equivalent to // alternately stepping through the number line by (p+2)*mult, (p-2)*mult, // where mult = (p+1)/6 s[0] = p+(2*((p+1)/6)); s[1] = p-(2*((p+1)/6)); // compute the starting location of this prime in this block if ((bid == 0) || (pstart >= block_start)) { // start one increment past the starting value of p/3, since // we want to count the prime itself as a prime. start_offset = pstart + s[0] - block_start; k = 1; } else { // measure how far the start of this block is from where the prime first landed, // as well as how many complete (+2/-2) steps it would need to take // to cover that distance uint32 dist = (block_start - pstart); uint32 steps = dist / p2; if ((dist % p2) == 0) { // if the number of steps is exact, then we hit the start // of this block exactly, and we start below with the +2 step. start_offset = 0; k = 0; } else { uint32 inc = pstart + steps * p2 + s[0]; if (inc >= block_start) { // if the prime reaches into this block on the first stride, // then start below with the -2 step start_offset = inc - block_start; k = 1; } else { // we need both +2 and -2 strides to get into the block, // so start below with the +2 stride. start_offset = inc + s[1] - block_start; k = 0; } } } // unroll the loop for the smallest primes. if (p < 1024) { uint32 stop = block_size - (2 * p * 4); if (k == 0) { for(i=start_offset ;i < stop; i+=8*p) { locsieve[i] = 0; locsieve[i+s[0]] = 0; locsieve[i+p2] = 0; locsieve[i+p2+s[0]] = 0; locsieve[i+4*p] = 0; locsieve[i+4*p+s[0]] = 0; locsieve[i+6*p] = 0; locsieve[i+6*p+s[0]] = 0; } } else { for(i=start_offset ;i < stop; i+=8*p) { locsieve[i] = 0; locsieve[i+s[1]] = 0; locsieve[i+p2] = 0; locsieve[i+p2+s[1]] = 0; locsieve[i+4*p] = 0; locsieve[i+4*p+s[1]] = 0; locsieve[i+6*p] = 0; locsieve[i+6*p+s[1]] = 0; } } } else i=start_offset; // alternate stepping between the large and small strides this prime takes. for( ;i < block_size; k = !k) { locsieve[i] = 0; i += s[k]; } } } // regroup before counting __syncthreads(); for (j=threadIdx.x * range, k=0; k<range; k++) locsieve[j + k] = (locsieve[j+k] & ((bitsieve[(j+k) >> 5] & (1 << ((j+k) & 31))) == 0)); __syncthreads(); if(threadIdx.x == 0) for (k=0; k < block_size; k++) { j = ((bid * block_size + k) * 3 + 1) >> 1; if(j < N >> 1) results[j] = locsieve[k]; } } uint32 tiny_soe(uint32 limit, uint32 *primes) { //simple sieve of erathosthenes for small limits - not efficient //for large limits. uint8 *flags; uint16 prime; uint32 i,j; int it; //allocate flags flags = (uint8 *)malloc(limit/2 * sizeof(uint8)); if (flags == NULL) printf("error allocating flags\n"); memset(flags,1,limit/2); //find the sieving primes, don't bother with offsets, we'll need to find those //separately for each line in the main sieve. primes[0] = 2; it=1; //sieve using primes less than the sqrt of the desired limit //flags are created only for odd numbers (mod2) for (i=1;i<(uint32)(sqrt((double)limit)/2+1);i++) { if (flags[i] > 0) { prime = (uint32)(2*i + 1); for (j=i+prime;j<limit/2;j+=prime) flags[j]=0; primes[it]=prime; it++; } } //now find the rest of the prime flags and compute the sieving primes for (;i<limit/2;i++) { if (flags[i] == 1) { primes[it] = (uint32)(2*i + 1); it++; } } free(flags); return it; } int gtpr(int n, uint8* bprimes) { uint32 Nsmall = (uint32) sqrt((double) n); int numblocks; int primes_per_thread; uint32* primes; uint32* device_primes; uint32 np; uint8* results; // find seed primes primes = (uint32*)malloc(Nsmall*sizeof(uint32)); np = tiny_soe(Nsmall, primes); // put the primes on the device cudaMalloc((void**) &device_primes, sizeof(uint32) * np); cudaMemcpy(device_primes, primes, sizeof(uint32)*np, cudaMemcpyHostToDevice); // compute how many whole blocks we have to sieve and how many primes each // thread will be responsible for. numblocks = (n / 3 / block_size + 1); primes_per_thread = ((np - startprime) + threadsPerBlock - 1) / threadsPerBlock; dim3 grid((uint32)sqrt((double)numblocks)+1,(uint32)sqrt((double)numblocks)+1); cudaMalloc((void**) &results, sizeof(uint8) * (n >> 1)); cudaMemset(results, 0, sizeof(uint8) * (n >> 1)); SegSieve<<<grid, threadsPerBlock, 0>>>(device_primes, np, primes_per_thread, n, results); cudaThreadSynchronize(); cudaMemcpy (bprimes, results, sizeof (uint8) * (n >> 1), cudaMemcpyDeviceToHost); cudaFree(device_primes); cudaFree(results); free(primes); return 0; } /************************************************************** * * FFT and other related Functions * **************************************************************/ /* rint is not ANSI compatible, so we need a definition for * WIN32 and other platforms with rint. * Also we use that to write the trick to rint() */ /**************************************************************************** * Lucas Test - specific routines * ***************************************************************************/ void reset_err(float* maxerr, float value) { *maxerr *= value; cutilSafeCall (cudaMemcpy (g_err, maxerr, sizeof (float), cudaMemcpyHostToDevice)); } float lucas_square (/*double *x,*/ int q, int n, int iter, int last, float* maxerr, int error_flag, int bit, int stage, int chkpt) { float terr = 0.0; if (iter < 100 && iter % 10 == 0) { cutilSafeCall (cudaMemcpy (&terr, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(terr > *maxerr) *maxerr = terr; } cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); square <<< n / (4 * threads2), threads2 >>> (n, g_x, g_ct); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); if(!bit) { norm1a <<<n / (2 * threads1), threads1 >>> (g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, *maxerr, chkpt); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_x, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, chkpt); } else { norm1b <<<n / (2 * threads1), threads1 >>> (g_x, g_datal, g_xint, g_ttmp, g_carryl, g_err, *maxerr, chkpt); norm2b <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_x, g_xint, n, threads1, g_datal, g_carryl, g_ttp1, chkpt); } if (error_flag) { cutilSafeCall (cudaMemcpy (&terr, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(terr > *maxerr) *maxerr = terr; } else if (polite_f && (iter % polite) == 0) cutilSafeThreadSync(); return (terr); } void init_x_int(int *x_int, unsigned *x_packed, int q, int n, int *stage) { int j; if(*stage == 0) { *stage = 1; for(j = 0; j < n; j++) x_int[j] = 0; x_int[0] = 1; if(x_packed) { for(j = 0; j < (q + 31) /32; j++) x_packed[j] = 0; x_packed[0] = 1; } } cudaMemcpy (g_xint, x_int, sizeof (int) * n , cudaMemcpyHostToDevice); } void E_init_d(double *g, double value, int n) { double x[1] = {value}; cutilSafeCall (cudaMemset (g, 0.0, sizeof (double) * n)); cudaMemcpy (g, x, sizeof (double) , cudaMemcpyHostToDevice); } void E_pre_mul(double *g_out, double *g_in, int n, int fft_f) { if(fft_f) cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_in, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); pre_mul <<<n / (4 * threads2), threads2>>> (n, g_out, g_ct); } void E_mul(double *g_out, double *g_in1, double *g_in2, int n, float err, int fft_f) { if(fft_f) cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_in1, (cufftDoubleComplex *) g_in1, CUFFT_INVERSE)); mult3 <<<n / (4 * threads2), threads2>>> (g_out, g_in1, g_in2, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); } void E_sub_mul(double *g_out, double *g_in1, double *g_in2, double *g_in3, int n, float err, int chkpt) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_in1, (cufftDoubleComplex *) g_in1, CUFFT_INVERSE)); sub_mul <<<n / (4 * threads2), threads2>>> (g_out, g_in1, g_in2, g_in3, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, &g_xint[n], g_ttmp, g_carryi, g_err, err, chkpt); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, &g_xint[n], n, threads1, g_datai, g_carryi, g_ttp1, chkpt); } void E_half_mul(double *g_out, double *g_in1, double *g_in2, int n, float err) { mult2 <<<n / (4 * threads2), threads2>>> (g_out, g_in1, g_in2, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); } int E_to_the_p(double *g_out, double *g_in, mpz_t p, int n, int trans, float *err) { // Assume g_in is premultiplied int last, j; int checksync = trans / (2 * 50) * 2 * 50; int checkerror = trans / (200) * 200; int checksave = trans / (2 * checkpoint_iter) * 2 * checkpoint_iter; int sync = 1; last = mpz_sizeinbase (p, 2); if (last == 1) { E_init_d(g_out, 1.0, n); if(mpz_tstbit (p, last - 1)) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); mult2 <<< n / (4 * threads2), threads2 >>> (g_out, g_out, g_in, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } return trans; } square1 <<< n / (4 * threads2), threads2 >>> (n, g_out, g_in, g_ct); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; cutilSafeCall (cudaMemcpy (err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(mpz_tstbit (p, last - 2)) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); mult2 <<< n / (4 * threads2), threads2 >>> (g_out, g_out, g_in, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } for(j = 3; j <= last && !quitting; j++) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); square <<< n / (4 * threads2), threads2 >>> (n, g_out, g_ct); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; if(mpz_tstbit (p, last - j)) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); mult2 <<< n / (4 * threads2), threads2 >>> (g_out, g_out, g_in, g_ct, n); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_out, (cufftDoubleComplex *) g_out, CUFFT_INVERSE)); norm1a <<<n / (2 * threads1), threads1 >>> (g_out, g_datai, g_xint, g_ttmp, g_carryi, g_err, *err, 0); norm2a <<< (n / (2 * threads1) + threads3 - 1) / threads3, threads3 >>> (g_out, g_xint, n, threads1, g_datai, g_carryi, g_ttp1, 0); trans += 2; } if(trans - checkerror > 200) { sync = 0; checkerror += 200; cutilSafeCall (cudaMemcpy (err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(*err > 0.4) quitting = 2; } if(trans - checksave > 2 * checkpoint_iter) { checksave += 2 * checkpoint_iter; reset_err(err, 0.85f); } if(sync && polite_f && trans - checksync > 2 * polite) { checksync += 2 * polite; cutilSafeThreadSync(); } sync = 1; fflush(NULL); } return trans; } /* -------- initializing routines -------- */ void makect (int nc, double *c) { int j; double d = (double) (nc << 1); for (j = 1; j <= nc; j++) c[j] = 0.5 * cospi (j / d); } void alloc_gpu_mem(int n) { cufftSafeCall (cufftPlan1d (&plan, n / 2, CUFFT_Z2Z, 1)); cutilSafeCall (cudaMalloc ((void **) &g_x, sizeof (double) * n )); cutilSafeCall (cudaMalloc ((void **) &g_ct, sizeof (double) * n / 4)); cutilSafeCall (cudaMalloc ((void **) &g_xint, sizeof (int) * 2 * n )); cutilSafeCall (cudaMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (cudaMalloc ((void **) &g_ttmp, sizeof (double) * n)); cutilSafeCall (cudaMalloc ((void **) &g_ttp1, sizeof (double) * 2 * n / threads1)); cutilSafeCall (cudaMalloc ((void **) &g_datai, sizeof (int) * 2 * n / threads1)); cutilSafeCall (cudaMalloc ((void **) &g_datal, sizeof (long long int) * 2 * n / threads1)); cutilSafeCall (cudaMemset (g_err, 0, sizeof (float))); cutilSafeCall (cudaMalloc ((void **) &g_carryl, sizeof (long long int) * n / threads1)); cutilSafeCall (cudaMalloc ((void **) &g_carryi, sizeof (int) * n / threads1)); } void write_gpu_data(int q, int n) { double *s_ttmp, *s_ttp1, *s_ct; int i, j, qn = q / n, b = q % n; int a, c, bj; double *h_ttp_inc; int *h_qn; s_ct = (double *) malloc (sizeof (double) * (n / 4)); s_ttmp = (double *) malloc (sizeof (double) * (n)); s_ttp1 = (double *) malloc (sizeof (double) * 2 * (n / threads1)); size = (char *) malloc (sizeof (char) * n); h_ttp_inc = (double *) malloc (sizeof (double) * 2); h_qn = (int *) malloc (sizeof (int) * 2); c = n - b; bj = 0; for (j = 1; j < n; j++) { bj += b; bj %= n; a = bj - n; if(j % 2 == 0) s_ttmp[j] = exp2 (a / (double) n) * 2.0 / n; else s_ttmp[j] = exp2 (-a / (double) n); size[j] = (bj >= c); if(size[j]) s_ttmp[j] = -s_ttmp[j]; } size[0] = 1; s_ttmp[0] = -2.0 / n; size[n-1] = 0; s_ttmp[n-1] = -s_ttmp[n-1]; for (i = 0, j = 0; i < n ; i += 2 * threads1) { s_ttp1[j] = abs(s_ttmp[i + 1]); if(size[i]) s_ttp1[j] = -s_ttp1[j]; j++; } makect (n / 4, s_ct); h_ttp_inc[0] = -exp2((b-n) / (double) n); h_ttp_inc[1] = -exp2(b / (double) n); set_ttp_inc(h_ttp_inc); h_qn[0] = qn; h_qn[1] = n; set_qn(h_qn); cutilSafeCall(cudaMemcpy (g_ttmp, s_ttmp, sizeof (double) * n, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy (g_ttp1, s_ttp1, sizeof (double) * 2 * n / threads1, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy (g_ct, s_ct, sizeof (double) * (n / 4), cudaMemcpyHostToDevice)); free ((char *) s_ct); free ((char *) s_ttmp); free ((char *) s_ttp1); free ((char *) h_ttp_inc); free ((char *) h_qn); } void free_host (int *x_int) { free ((char *) size); free ((char *) x_int); } void free_gpu(void) { cufftSafeCall (cufftDestroy (plan)); cutilSafeCall (cudaFree ((char *) g_x)); cutilSafeCall (cudaFree ((char *) g_ct)); cutilSafeCall (cudaFree ((char *) g_xint)); cutilSafeCall (cudaFree ((char *) g_err)); cutilSafeCall (cudaFree ((char *) g_ttp1)); cutilSafeCall (cudaFree ((char *) g_ttmp)); cutilSafeCall (cudaFree ((char *) g_datai)); cutilSafeCall (cudaFree ((char *) g_datal)); cutilSafeCall (cudaFree ((char *) g_carryl)); cutilSafeCall (cudaFree ((char *) g_carryi)); } void close_lucas (int *x_int) { free_host(x_int); free_gpu(); } /************************************************************************** * * * End LL/GPU Functions, Begin Utility/CPU Functions * * * **************************************************************************/ void init_threads(int n) { FILE *threads; char buf[132]; char threadfile[32]; int no_file = 0, no_entry = 1; int th1 = 0, th2 = 0, th3 = 0; int temp_n; sprintf (threadfile, "%s threads.txt", dev.name); threads = fopen(threadfile, "r"); if(threads) { while(fgets(buf, 132, threads) != NULL) { sscanf(buf, "%d %d %d %d", &temp_n, &th1, &th2, &th3); if(n == temp_n * 1024) { threads1 = th1; threads2 = th2; threads3 = th3; no_entry = 0; } } } else no_file = 1; if(no_file || no_entry) { if(no_file) printf("No %s file found. Using default thread sizes.\n", threadfile); else if(no_entry) printf("No entry for fft = %dk found. Using default thread sizes.\n", n / 1024); printf("For optimal thread selection, please run\n"); printf("./CUDAPm1 -cufftbench %d %d r\n", n / 1024, n / 1024); printf("for some small r, 0 < r < 6 e.g.\n"); fflush(NULL); } return; } int init_ffts() { //#define COUNT 139 FILE *fft; char buf[132]; int next_fft, j = 0, i = 0; int first_found = 0; #define COUNT 160 int default_mult[COUNT] = { //this batch from GTX570 timings 2, 8, 10, 14, 16, 18, 20, 32, 36, 42, 48, 50, 56, 60, 64, 70, 80, 84, 96, 112, 120, 126, 128, 144, 160, 162, 168, 180, 192, 224, 256, 288, 320, 324, 336, 360, 384, 392, 400, 448, 512, 576, 640, 648, 672, 720, 768, 784, 800, 864, 896, 900, 1024, 1152, 1176, 1280, 1296, 1344, 1440, 1568, 1600, 1728, 1792, 2048, 2160, 2304, 2352, 2592, 2688, 2880, 3024, 3136, 3200, 3584, 3600, 4096, 4320, 4608, 4704, 5120, 5184, 5600, 5760, 6048, 6144, 6272, 6400, 6480, 7168, 7200, 7776, 8064, 8192, 8640, 9216, 9408, 10240, 10368, 10584, 10800, 11200, 11520, 12096, 12288, 12544, 12960, 13824, 14336, 14400, 16384, 17496, 18144, 19208, 19600, 20000, 20250, 21952, 23328, 23814, 24300, 24500, 25088, 25600, 26244, 27000, 27216, 28000, 28672, 31104, 31250, 32000, 32400, 32768, 33614, 34992, 36000, 36288, 38416, 39200, 39366, 40500, 41472, 42336, 43200, 43904, 47628, 49000, 50000, 50176, 51200, 52488, 54432, 55296, 56000, 57344, 60750, 62500, 64000, 64800, 65536 }; char fftfile[32]; sprintf (fftfile, "%s fft.txt", dev.name); fft = fopen(fftfile, "r"); if(!fft) { printf("No %s file found. Using default fft lengths.\n", fftfile); printf("For optimal fft selection, please run\n"); printf("./CUDAPm1 -cufftbench 1 8192 r\n"); printf("for some small r, 0 < r < 6 e.g.\n"); fflush(NULL); for(j = 0; j < COUNT; j++) multipliers[j] = default_mult[j]; } else { while(fgets(buf, 132, fft) != NULL) { int le = 0; sscanf(buf, "%d", &le); if(next_fft = atoi(buf)) { if(!first_found) { while(i < COUNT && default_mult[i] < next_fft) { multipliers[j] = default_mult[i]; i++; j++; } multipliers[j] = next_fft; j++; first_found = 1; } else { multipliers[j] = next_fft; j++; } } } while(default_mult[i] < multipliers[j - 1] && i < COUNT) i++; while(i < COUNT) { multipliers[j] = default_mult[i]; j++; i++; } fclose(fft); } return j; } int choose_fft_length (int q, int *index) { /* In order to increase length if an exponent has a round off issue, we use an extra paramter that we can adjust on the fly. In check(), index starts as -1, the default. In that case, choose from the table. If index >= 0, we must assume it's an override index and return the corresponding length. If index > table-count, then we assume it's a manual fftlen and return the proper index. */ if( 0 < *index && *index < fft_count ) return 1024*multipliers[*index]; else if( *index >= fft_count || q == 0) { /* override with manual fftlen passed as arg; set pointer to largest index <= fftlen */ int len, i; for(i = fft_count - 1; i >= 0; i--) { len = 1024*multipliers[i]; if( len <= *index ) { *index = i; return len; /* not really necessary, but now we could decide to override fftlen with this value */ } } } else { // *index < 0, not override, choose length and set pointer to proper index int i; int estimate = ceil(1.01 * 0.0000358738168878758 * exp (1.0219834608 * log ((double) q))); for(i = 0; i < fft_count; i++) { if(multipliers[i] >= estimate) { *index = i; printf("Index %d\n",*index); return multipliers[i] * 1024; } } } return 0; } int fft_from_str(const char* str) /* This is really just strtoul with some extra magic to deal with K or M */ { char* endptr; const char* ptr = str; int len, mult = 0; while( *ptr ) { if( *ptr == 'k' || *ptr == 'K' ) { mult = 1024; break; } if( *ptr == 'm' || *ptr == 'M' ) { mult = 1024*1024; break; } ptr++; } if( !mult ) { // No K or M, treat as before (PS The Python else clause on loops I mention in parse.c would be useful here :) ) mult = 1; } len = (int) strtoul(str, &endptr, 10)*mult; if( endptr != ptr ) { // The K or M must directly follow the num (or the num must extend to the end of the str) fprintf (stderr, "can't parse fft length \"%s\"\n\n", str); exit (2); } return len; } //From apsen void print_time_from_seconds (int sec) { if (sec > 3600) { printf ("%d", sec / 3600); sec %= 3600; printf (":%02d", sec / 60); } else printf ("%d", sec / 60); sec %= 60; printf (":%02d", sec); } void init_device (int device_number) { int device_count = 0; cudaGetDeviceCount (&device_count); if (device_number >= device_count) { printf ("device_number >= device_count ... exiting\n"); printf ("(This is probably a driver problem)\n\n"); exit (2); } cudaSetDevice (device_number); cudaSetDeviceFlags (cudaDeviceBlockingSync); cudaGetDeviceProperties (&dev, device_number); // From Iain if (dev.major == 1 && dev.minor < 3) { printf("A GPU with compute capability >= 1.3 is required for double precision arithmetic\n\n"); exit (2); } if (d_f) { printf ("------- DEVICE %d -------\n", device_number); printf ("name %s\n", dev.name); printf ("Compatibility %d.%d\n", dev.major, dev.minor); printf ("clockRate (MHz) %d\n", dev.clockRate/1000); printf ("memClockRate (MHz) %d\n", dev.memoryClockRate/1000); #ifdef _MSC_VER printf ("totalGlobalMem %Iu\n", dev.totalGlobalMem); #else printf ("totalGlobalMem %zu\n", dev.totalGlobalMem); #endif #ifdef _MSC_VER printf ("totalConstMem %Iu\n", dev.totalConstMem); #else printf ("totalConstMem %zu\n", dev.totalConstMem); #endif printf ("l2CacheSize %d\n", dev.l2CacheSize); #ifdef _MSC_VER printf ("sharedMemPerBlock %Iu\n", dev.sharedMemPerBlock); #else printf ("sharedMemPerBlock %zu\n", dev.sharedMemPerBlock); #endif printf ("regsPerBlock %d\n", dev.regsPerBlock); printf ("warpSize %d\n", dev.warpSize); #ifdef _MSC_VER printf ("memPitch %Iu\n", dev.memPitch); #else printf ("memPitch %zu\n", dev.memPitch); #endif printf ("maxThreadsPerBlock %d\n", dev.maxThreadsPerBlock); printf ("maxThreadsPerMP %d\n", dev.maxThreadsPerMultiProcessor); printf ("multiProcessorCount %d\n", dev.multiProcessorCount); printf ("maxThreadsDim[3] %d,%d,%d\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]); printf ("maxGridSize[3] %d,%d,%d\n", dev.maxGridSize[0], dev.maxGridSize[1], dev.maxGridSize[2]); #ifdef _MSC_VER printf ("textureAlignment %Iu\n", dev.textureAlignment); #else printf ("textureAlignment %zu\n", dev.textureAlignment); #endif printf ("deviceOverlap %d\n\n", dev.deviceOverlap); } } void rm_checkpoint (int q, int ks1) { char chkpnt_cfn[32]; char chkpnt_tfn[32]; if(!ks1) { sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); (void) unlink (chkpnt_cfn); (void) unlink (chkpnt_tfn); } sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); (void) unlink (chkpnt_cfn); (void) unlink (chkpnt_tfn); } int standardize_digits_int (int *x_int, int q, int n, int offset, int num_digits) { int j, digit, stop, qn = q / n, carry = 0; int temp; int lo = 1 << qn; int hi = lo << 1; digit = floor(offset * (n / (double) q)); j = (n + digit - 1) % n; while(x_int[j] == 0 && j != digit) j = (n + j - 1) % n; if(j == digit && x_int[digit] == 0) return(1); else if (x_int[j] < 0) carry = -1; { stop = (digit + num_digits) % n; j = digit; do { x_int[j] += carry; carry = 0; if (size[j]) temp = hi; else temp = lo; if(x_int[j] < 0) { x_int[j] += temp; carry = -1; } j = (j + 1) % n; } while(j != stop); } return(0); } void balance_digits_int(int* x, int q, int n) { int half_low = (1 << (q / n - 1)); int low = half_low << 1; int high = low << 1; int upper, adj, carry = 0; int j; for(j = 0; j < n; j++) { if(size[j]) { upper = low; adj = high; } else { upper = half_low; adj = low; } x[j] += carry; carry = 0; if(x[j] >= upper) { x[j] -= adj; carry = 1; } } x[0] += carry; // Good enough for our purposes. } unsigned * read_checkpoint_packed (int q) { //struct stat FileAttrib; FILE *fPtr; unsigned *x_packed; char chkpnt_cfn[32]; char chkpnt_tfn[32]; int end = (q + 31) / 32; x_packed = (unsigned *) malloc (sizeof (unsigned) * (end + 25)); sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); fPtr = fopen (chkpnt_cfn, "rb"); if (!fPtr) { //#ifndef _MSC_VER //if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the checkpoint file. Trying the backup file.\n"); //#endif } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose(fPtr); } else { fclose(fPtr); return x_packed; } fPtr = fopen(chkpnt_tfn, "rb"); if (!fPtr) { //#ifndef _MSC_VER // if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the backup file. Restarting test.\n"); //#endif } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose(fPtr); } else { fclose(fPtr); return x_packed; } x_packed[end] = q; x_packed[end + 1] = 0; // n x_packed[end + 2] = 1; // iteration number x_packed[end + 3] = 0; // stage x_packed[end + 4] = 0; // accumulated time x_packed[end + 5] = 0; // b1 // 6-9 reserved for extending b1 // 10-24 reserved for stage 2 int i; for(i = 6; i < 25; i++) x_packed[end + i] = 0; return x_packed; } int read_st2_checkpoint (int q, unsigned *x_packed) { //struct stat FileAttrib; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; int end = (q + 31) / 32; sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); fPtr = fopen (chkpnt_cfn, "rb"); if (!fPtr) { // if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the checkpoint file. Trying the backup file.\n"); } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe checkpoint appears to be corrupt. Trying the backup file.\n"); fclose(fPtr); } else { fclose(fPtr); return 1; } fPtr = fopen(chkpnt_tfn, "rb"); if (!fPtr) { //if(stat(chkpnt_cfn, &FileAttrib) == 0) fprintf (stderr, "\nUnable to open the backup file. Restarting test.\n"); } else if (fread (x_packed, 1, sizeof (unsigned) * (end + 25) , fPtr) != (sizeof (unsigned) * (end + 25))) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n"); fclose (fPtr); } else if(x_packed[end] != (unsigned int) q) { fprintf (stderr, "\nThe backup appears to be corrupt. Restarting test.\n");; fclose(fPtr); } else { fclose(fPtr); return 1; } return 0; } void pack_bits_int(int *x_int, unsigned *packed_x, int q , int n) { unsigned long long temp1, temp2 = 0; int i, j = 0, k = 0; int qn = q / n; for(i = 0; i < n; i++) { temp1 = x_int[i]; temp2 += (temp1 << k); k += qn + size[i]; if(k >= 32) { packed_x[j] = (unsigned) temp2; temp2 >>= 32; k -= 32; j++; } } packed_x[j] = (unsigned) temp2; } void set_checkpoint_data(unsigned *x_packed, int q, int n, int j, int stage, int time) { int end = (q + 31) / 32; x_packed[end + 0] = q; x_packed[end + 1] = n; x_packed[end + 2] = j; x_packed[end + 3] = stage; x_packed[end + 4] = time; } void write_checkpoint_packed (unsigned *x_packed, int q) { int end = (q + 31) / 32; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; sprintf (chkpnt_cfn, "c%ds1", q); sprintf (chkpnt_tfn, "t%ds1", q); (void) unlink (chkpnt_tfn); (void) rename (chkpnt_cfn, chkpnt_tfn); fPtr = fopen (chkpnt_cfn, "wb"); if (!fPtr) { fprintf(stderr, "Couldn't write checkpoint.\n"); return; } fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); if (s_f > 0) // save all checkpoint files { char chkpnt_sfn[64]; #ifndef _MSC_VER sprintf (chkpnt_sfn, "%s/s" "%d.%d.%s", folder, q, x_packed[end + 2], s_residue); #else sprintf (chkpnt_sfn, "%s\\s" "%d.%d.%s.txt", folder, q, x_packed[end + 2], s_residue); #endif fPtr = fopen (chkpnt_sfn, "wb"); if (!fPtr) return; fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); } } void write_st2_checkpoint (unsigned *x_packed, int q) { int end = (q + 31) / 32; FILE *fPtr; char chkpnt_cfn[32]; char chkpnt_tfn[32]; sprintf (chkpnt_cfn, "c%ds2", q); sprintf (chkpnt_tfn, "t%ds2", q); (void) unlink (chkpnt_tfn); (void) rename (chkpnt_cfn, chkpnt_tfn); fPtr = fopen (chkpnt_cfn, "wb"); if (!fPtr) { fprintf(stderr, "Couldn't write checkpoint.\n"); return; } fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); if (s_f > 0) // save all checkpoint files { char chkpnt_sfn[64]; #ifndef _MSC_VER sprintf (chkpnt_sfn, "%s/s" "%d.%d.%s", folder, q, x_packed[end + 2], s_residue); #else sprintf (chkpnt_sfn, "%s\\s" "%d.%d.%s.txt", folder, q, x_packed[end + 2], s_residue); #endif fPtr = fopen (chkpnt_sfn, "wb"); if (!fPtr) return; fwrite (x_packed, 1, sizeof (unsigned) * (end + 25), fPtr); fclose (fPtr); } } int printbits_int (int *x_int, int q, int n, int offset, FILE* fp, char *expectedResidue, int o_f) { int j, k = 0; int digit, bit; unsigned long long temp, residue = 0; digit = floor(offset * (n / (double) q)); bit = offset - ceil(digit * (q / (double) n)); j = digit; while(k < 64) { temp = x_int[j]; residue = residue + (temp << k); k += q / n + size[j % n]; if(j == digit) { k -= bit; residue >>= bit; } j = (j + 1) % n; } sprintf (s_residue, "%016llx", residue); printf ("M%d, 0x%s,", q, s_residue); //if(o_f) printf(" offset = %d,", offset); printf (" n = %dK, %s", n/1024, program); if (fp) { fprintf (fp, "M%d, 0x%s,", q, s_residue); if(o_f) fprintf(fp, " offset = %d,", offset); fprintf (fp, " n = %dK, %s", n/1024, program); } return 0; } void unpack_bits_int(int *x_int, unsigned *packed_x, int q , int n) { unsigned long long temp1, temp2 = 0; int i, j = 0, k = 0; int qn = q / n; int mask1 = -1 << (qn + 1); int mask2; int mask; mask1 = ~mask1; mask2 = mask1 >> 1; for(i = 0; i < n; i++) { if(k < qn + size[i]) { temp1 = packed_x[j]; temp2 += (temp1 << k); k += 32; j++; } if(size[i]) mask = mask1; else mask = mask2; x_int[i] = ((int) temp2) & mask; temp2 >>= (qn + size[i]); k -= (qn + size[i]); } } int* init_lucas_packed_int(unsigned * x_packed, int q , int *n, int *j, int *stage, int *total_time) { int *x; int new_n, old_n; int end = (q + 31) / 32; int new_test = 0; *n = x_packed[end + 1]; if(*n == 0) new_test = 1; *j = x_packed[end + 2]; *stage = x_packed[end + 3]; if(total_time) *total_time = x_packed[end + 4]; old_n = fftlen; if(fftlen == 0) fftlen = *n; new_n = choose_fft_length(q, &fftlen); if(old_n > fft_count) *n = old_n; else if (new_test || old_n) *n = new_n; init_threads(*n); printf("Using threads: norm1 %d, mult %d, norm2 %d.\n", threads1, threads2, threads3); if ((*n / (2 * threads1)) > dev.maxGridSize[0]) { fprintf (stderr, "over specifications Grid = %d\n", (int) *n / (2 * threads1)); fprintf (stderr, "try increasing norm1 threads (%d) or decreasing FFT length (%dK)\n\n", threads1, *n / 1024); return NULL; } if ((*n / (4 * threads2)) > dev.maxGridSize[0]) { fprintf (stderr, "over specifications Grid = %d\n", (int) *n / (4 * threads2)); fprintf (stderr, "try increasing mult threads (%d) or decreasing FFT length (%dK)\n\n", threads2, *n / 1024); return NULL; } if ((*n % (2 * threads1)) != 0) { fprintf (stderr, "fft length %d must be divisible by 2 * norm1 threads %d\n", *n, threads1); return NULL; } if ((*n % (4 * threads2)) != 0) { fprintf (stderr, "fft length %d must be divisible by 4 * mult threads %d\n", *n, threads2); return NULL; } if (q < *n * 0.8 * log((double) *n)) { fprintf (stderr, "The fft length %dK is too large for the exponent %d. Restart with smaller fft.\n", *n / 1024, q); return NULL; } x = (int *) malloc (sizeof (int) * *n); alloc_gpu_mem(*n); write_gpu_data(q, *n); if(!new_test) { unpack_bits_int(x, x_packed, q, *n); balance_digits_int(x, q, *n); } init_x_int(x, x_packed, q, *n, stage); apply_weights <<<*n / (2 * threads1), threads1>>> (g_x, g_xint, g_ttmp); return x; } int isReasonable(int fft) { //From an idea of AXN's mentioned on the forums int i; while(!(fft & 1)) fft /= 2; for(i = 3; i <= 7; i += 2) while((fft % i) == 0) fft /= i; return (fft); } void threadbench (int n, int passes, int device_number) { float total[216] = {0.0f}, outerTime, maxerr = 0.5f; int threads[6] = {32, 64, 128, 256, 512, 1024}; int t1, t2, t3, i; float best_time = 10000.0f; int best_t1 = 0, best_t2 = 0, best_t3 = 0; int pass; cudaEvent_t start, stop; printf("CUDA bench, testing various thread sizes for fft %dK, doing %d passes.\n", n, passes); fflush(NULL); n *= 1024; cutilSafeCall (cudaMalloc ((void **) &g_x, sizeof (double) * n)); cutilSafeCall (cudaMemset (g_x, 0, sizeof (double) * n)); cutilSafeCall (cudaMalloc ((void **) &g_ttmp, sizeof (double) * n)); cutilSafeCall (cudaMemset (g_ttmp, 0, sizeof (double) * n)); cutilSafeCall (cudaMalloc ((void **) &g_ct, sizeof (double) * n / 4)); cutilSafeCall (cudaMemset (g_ct, 0, sizeof (double) * n / 4)); cutilSafeCall (cudaMalloc ((void **) &g_ttp1, sizeof (double) * n / 32)); cutilSafeCall (cudaMalloc ((void **) &g_datai, sizeof (int) * n / 32)); cutilSafeCall (cudaMalloc ((void **) &g_carryi, sizeof (int) * n / 64)); cutilSafeCall (cudaMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (cudaMemset (g_err, 0, sizeof (float))); cutilSafeCall (cudaEventCreate (&start)); cutilSafeCall (cudaEventCreateWithFlags (&stop, cudaEventBlockingSync)); cufftSafeCall (cufftPlan1d (&plan, n / 2, CUFFT_Z2Z, 1)); for(t1 = 0; t1 < 6; t1++) { if(n / (2 * threads[t1]) <= dev.maxGridSize[0] && n % (2 * threads[t1]) == 0) { for (t2 = 0; t2 < 6; t2++) { if(n / (4 * threads[t2]) <= dev.maxGridSize[0] && n % (4 * threads[t2]) == 0) { for (t3 = 0; t3 < 6; t3++) { for(pass = 1; pass <= passes; pass++) { cutilSafeCall (cudaEventRecord (start, 0)); for (i = 0; i < 50; i++) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); square <<< n / (4 * threads[t2]), threads[t2] >>> (n, g_x, g_ct); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); norm1a <<< n / (2 * threads[t1]), threads[t1] >>> (g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, maxerr, 0); norm2a <<< (n / (2 * threads[t1]) + threads[t3] - 1) / threads[t3], threads[t3] >>> (g_x, g_xint, n, threads[t1], g_datai, g_carryi, g_ttp1, 0); } cutilSafeCall (cudaEventRecord (stop, 0)); cutilSafeCall (cudaEventSynchronize (stop)); cutilSafeCall (cudaEventElapsedTime (&outerTime, start, stop)); outerTime /= 50.0f; total[36 * t1 + 6 * t2 + t3] += outerTime; //if(outerTime > max_diff[i]) max_diff[i] = outerTime; } printf ("fft size = %dK, ave time = %2.4f msec, Norm1 threads %d, Mult threads %d, Norm2 threads %d\n", n / 1024 , total[36 * t1 + 6 * t2 + t3] / passes, threads[t1], threads[t2], threads[t3]); fflush(NULL); } } } } } for (i = 0; i < 216; i++) { if(total[i] < best_time && total[i] > 0.0f) { int j = i; best_time = total[i]; best_t3 = j % 6; j /= 6; best_t2 = j % 6; best_t1 = j / 6; } } printf("\nBest time for fft = %dK, time: %2.4f, t1 = %d, t2 = %d, t3 = %d\n", n/1024, best_time / passes, threads[best_t1], threads[best_t2], threads[best_t3]); cufftSafeCall (cufftDestroy (plan)); cutilSafeCall (cudaFree ((char *) g_x)); cutilSafeCall (cudaFree ((char *) g_ttmp)); cutilSafeCall (cudaFree ((char *) g_ttp1)); cutilSafeCall (cudaFree ((char *) g_ct)); cutilSafeCall (cudaFree ((char *) g_datai)); cutilSafeCall (cudaFree ((char *) g_carryi)); cutilSafeCall (cudaFree ((char *) g_err)); cutilSafeCall (cudaEventDestroy (start)); cutilSafeCall (cudaEventDestroy (stop)); char threadfile[32]; sprintf (threadfile, "%s threads.txt", dev.name); FILE *fptr; fptr = fopen(threadfile, "a+"); if(!fptr) printf("Can't open %s threads.txt\n", dev.name); else fprintf(fptr, "%5d %4d %4d %4d %8.4f\n", n / 1024, threads[best_t1], threads[best_t2], threads[best_t3], best_time / passes); } int isprime(unsigned int n) { unsigned int i; if(n<=1) return 0; if(n>2 && n%2==0)return 0; i=3; while(i*i <= n && i < 0x10000) { if(n%i==0)return 0; i+=2; } return 1; } void cufftbench (int cufftbench_s, int cufftbench_e, int passes, int device_number) { //if(cufftbench_s % 2) cufftbench_s++; cudaEvent_t start, stop; float outerTime; int i, j, k; int end = cufftbench_e - cufftbench_s + 1; float best_time; float *total, *max_diff, maxerr = 0.5f; int threads[] = {32, 64, 128, 256, 512, 1024}; int t1 = 3, t2 = 2, t3 = 2; if(end == 1) { threadbench(cufftbench_e, passes, device_number); return; } printf ("CUDA bench, testing reasonable fft sizes %dK to %dK, doing %d passes.\n", cufftbench_s, cufftbench_e, passes); total = (float *) malloc (sizeof (float) * end); max_diff = (float *) malloc (sizeof (float) * end); for(i = 0; i < end; i++) { total[i] = max_diff[i] = 0.0f; } cutilSafeCall (cudaMalloc ((void **) &g_x, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (cudaMemset (g_x, 0, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_ttmp, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (cudaMemset (g_ttmp, 0, sizeof (double) * 1024 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_ct, sizeof (double) * 256 * cufftbench_e)); cutilSafeCall (cudaMemset (g_ct, 0, sizeof (double) * 256 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_ttp1, sizeof (double) * 1024 / 32 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_datai, sizeof (int) * 1024 / 32 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_carryi, sizeof (int) * 512 / 32 * cufftbench_e)); cutilSafeCall (cudaMalloc ((void **) &g_err, sizeof (float))); cutilSafeCall (cudaMemset (g_err, 0, sizeof (float))); cutilSafeCall (cudaEventCreate (&start)); cutilSafeCall (cudaEventCreateWithFlags (&stop, cudaEventBlockingSync)); for (j = cufftbench_s; j <= cufftbench_e; j++) { if(isReasonable(j) < 15) { int n = j * 1024; cufftSafeCall (cufftPlan1d (&plan, n / 2, CUFFT_Z2Z, 1)); for(k = 0; k < passes; k++) { cutilSafeCall (cudaEventRecord (start, 0)); for (i = 0; i < 50; i++) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); square <<< n / (4 * threads[t2]), threads[t2] >>> (n, g_x, g_ct); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) g_x, (cufftDoubleComplex *) g_x, CUFFT_INVERSE)); norm1a <<< n / (2 * threads[t1]), threads[t1] >>> (g_x, g_datai, g_xint, g_ttmp, g_carryi, g_err, maxerr, 0); norm2a <<< (n / (2 * threads[t1]) + threads[t3] - 1) / threads[t3], threads[t3] >>> (g_x, g_xint, n, threads[t1], g_datai, g_carryi, g_ttp1, 0); } cutilSafeCall (cudaEventRecord (stop, 0)); cutilSafeCall (cudaEventSynchronize (stop)); cutilSafeCall (cudaEventElapsedTime (&outerTime, start, stop)); i = j - cufftbench_s; outerTime /= 50.0f; total[i] += outerTime; if(outerTime > max_diff[i]) max_diff[i] = outerTime; } cufftSafeCall (cufftDestroy (plan)); printf ("fft size = %dK, ave time = %2.4f msec, max-ave = %0.5f\n", j , total[i] / passes, max_diff[i] - total[i] / passes); fflush(NULL); } } cutilSafeCall (cudaFree ((char *) g_x)); cutilSafeCall (cudaFree ((char *) g_ttmp)); cutilSafeCall (cudaFree ((char *) g_ttp1)); cutilSafeCall (cudaFree ((char *) g_ct)); cutilSafeCall (cudaFree ((char *) g_datai)); cutilSafeCall (cudaFree ((char *) g_carryi)); cutilSafeCall (cudaFree ((char *) g_err)); cutilSafeCall (cudaEventDestroy (start)); cutilSafeCall (cudaEventDestroy (stop)); i = end - 1; best_time = 10000.0f; while(i >= 0) { if(total[i] > 0.0f && total[i] < best_time) best_time = total[i]; else total[i] = 0.0f; i--; } char fftfile[32]; FILE *fptr; sprintf (fftfile, "%s fft.txt", dev.name); fptr = fopen(fftfile, "w"); if(!fptr) { printf("Cannot open %s.\n",fftfile); printf ("Device %s\n", dev.name); printf ("Compatibility %d.%d\n", dev.major, dev.minor); printf ("clockRate (MHz) %d\n", dev.clockRate/1000); printf ("memClockRate (MHz) %d\n", dev.memoryClockRate/1000); printf("\n fft max exp ms/iter\n"); for(i = 0; i < end; i++) { if(total[i] > 0.0f) { int tl = (int) (exp(0.9784876919 * log ((double)cufftbench_s + i)) * 22366.92473079 / 1.01); if(tl % 2 == 0) tl -= 1; while(!isprime(tl)) tl -= 2; printf("%5d %10d %8.4f\n", cufftbench_s + i, tl, total[i] / passes); } } fflush(NULL); } else { fprintf (fptr, "Device %s\n", dev.name); fprintf (fptr, "Compatibility %d.%d\n", dev.major, dev.minor); fprintf (fptr, "clockRate (MHz) %d\n", dev.clockRate/1000); fprintf (fptr, "memClockRate (MHz) %d\n", dev.memoryClockRate/1000); fprintf(fptr, "\n fft max exp ms/iter\n"); for(i = 0; i < end; i++) { if(total[i] > 0.0f) { int tl = (int) (exp(0.9784876919 * log ((double)cufftbench_s + i)) * 22366.92473079 / 1.01); if(tl % 2 == 0) tl -= 1; while(!isprime(tl)) tl -= 2; fprintf(fptr, "%5d %10d %8.4f\n", cufftbench_s + i, tl, total[i] / passes); } } fclose(fptr); printf("Optimal fft lengths saved in %s.\nPlease email a copy to [email protected].\n", fftfile); fflush(NULL); } free ((char *) total); free ((char *) max_diff); } void SetQuitting (int sig) { quitting = 1; sig==SIGINT ? printf( "\tSIGINT") : (sig==SIGTERM ? printf( "\tSIGTERM") : printf( "\tUnknown signal")) ; printf( " caught, writing checkpoint.\n"); } #ifndef _MSC_VER #include <termios.h> #include <unistd.h> #include <fcntl.h> int _kbhit (void) { struct termios oldt, newt; int ch; int oldf; tcgetattr (STDIN_FILENO, &oldt); newt = oldt; newt.c_lflag &= ~(ICANON | ECHO); tcsetattr (STDIN_FILENO, TCSANOW, &newt); oldf = fcntl (STDIN_FILENO, F_GETFL, 0); fcntl (STDIN_FILENO, F_SETFL, oldf | O_NONBLOCK); ch = getchar (); tcsetattr (STDIN_FILENO, TCSANOW, &oldt); fcntl (STDIN_FILENO, F_SETFL, oldf); if (ch != EOF) { ungetc (ch, stdin); return 1; } return 0; } #else #include <conio.h> #endif int interact(void); // defined below everything else int get_bit(int location, unsigned *control) { int digit = location / 32; int bit = location % 32; bit = 1 << bit; bit = control[digit] & bit; if(bit) bit /= bit; return(bit); } int round_off_test(int q, int n, int *j, unsigned *control, int last) { int k; float totalerr = 0.0; float terr, avgerr, maxerr = 0.0; float max_err = 0.0, max_err1 = 0.0; int bit; printf("Running careful round off test for 1000 iterations. If average error > 0.25, the test will restart with a longer FFT.\n"); for (k = 0; k < 1000 && k < last; k++) { bit = get_bit(last - k - 1, control); terr = lucas_square (q, n, k, last, &maxerr, 1, bit, 1, k == 999); if(terr > maxerr) maxerr = terr; if(terr > max_err) max_err = terr; if(terr > max_err1) max_err1 = terr; totalerr += terr; reset_err(&maxerr, 0.85); if(terr >= 0.35) { printf ("Iteration = %d < 1000 && err = %5.5f >= 0.35, increasing n from %dK\n", k, terr, n/1024); fftlen++; return 1; } if(k && (k % 100 == 0)) { printf( "Iteration %d, average error = %5.5f, max error = %5.5f\n", k, totalerr / k, max_err); max_err = 0.0; } } avgerr = totalerr/1000.0; if( avgerr > 0.25) { printf("Iteration 1000, average error = %5.5f > 0.25 (max error = %5.5f), increasing FFT length and restarting\n", avgerr, max_err); fftlen++; return 1; } else if( avgerr < 0 ) { fprintf(stderr, "Something's gone terribly wrong! Avgerr = %5.5f < 0 !\n", avgerr); exit (2); } else { printf("Iteration 1000, average error = %5.5f <= 0.25 (max error = %5.5f), continuing test.\n", avgerr, max_err1); reset_err(&maxerr, 0.85); } *j += 1000; return 0; } unsigned *get_control(int *j, int lim1, int lim2, int q) { mpz_t result; int p = 2; int limit; int prime_power = 1; unsigned *control = NULL; mpz_init(result); if(lim2 == 0) { mpz_set_ui (result, 2 * q); limit = lim1 / p; while (prime_power <= limit) prime_power *= p; mpz_mul_ui(result, result, prime_power); p = 3; while (p <= lim1) { while(p <= lim1 && !isprime(p)) p += 2; limit = lim1 / p; prime_power = p; while (prime_power <= limit) prime_power *= p; mpz_mul_ui(result, result, prime_power); p += 2; } } else { p = lim1; if(!(lim1 & 1)) p++; mpz_set_ui (result, 1); while (p <= lim2) { while(p <= lim2 && !isprime(p)) p += 2; mpz_mul_ui(result, result, p); printf("prime_power: %d, %d\n", prime_power, p); p += 2; } } *j = mpz_sizeinbase (result, 2); control = (unsigned *) malloc (sizeof (unsigned) * ((*j + 31) / 32)); mpz_export (control, NULL, -1, 4, 0, 0, result); mpz_clear (result); return control; } int get_gcd(unsigned *x_packed, int q, int n, int stage) { mpz_t result, prime, prime1; int end = (q + 31) / 32; int rv = 0; mpz_init2( result, q); mpz_init2( prime, q); mpz_init2( prime1, q); mpz_import (result, end, -1, sizeof(x_packed[0]), 0, 0, x_packed); if(stage == 1) mpz_sub_ui (result, result, 1); mpz_setbit (prime, q); mpz_sub_ui (prime, prime, 1); if (mpz_cmp_ui (result, 0)) { mpz_gcd (prime1, prime, result); if (mpz_cmp_ui (prime1, 1)) { rv = 1; printf( "M%d has a factor: ", q); mpz_out_str (stdout, 10, prime1); if (stage==1) printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,b1,g_e,n/1024, program); // Found in stage 1 else printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); FILE* fp = fopen_and_lock(RESULTSFILE, "a"); fprintf (fp, "M%d has a factor: ", q); mpz_out_str (fp, 10, prime1); if (AID[0] && strncasecmp(AID, "N/A", 3)) { if (stage==1) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,b1,g_e,n/1024, AID, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,g_b2,g_e,n/1024, AID, program); } else { if (stage==1) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,b1,g_e,n/1024, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); } unlock_and_fclose(fp); } } if (rv == 0) { printf( "M%d Stage %d found no factor", q, stage); printf (" (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); if (stage==2) { FILE* fp = fopen_and_lock(RESULTSFILE, "a"); fprintf (fp, "M%d found no factor", q); if (AID[0] && strncasecmp(AID, "N/A", 3)) fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK, aid=%s %s)\n", b1,g_b2,g_e,n/1024, AID, program); else fprintf (fp, " (P-1, B1=%d, B2=%d, e=%d, n=%dK %s)\n", b1,g_b2,g_e,n/1024, program); unlock_and_fclose(fp); } } mpz_clear (result); mpz_clear (prime); mpz_clear (prime1); return rv; } /**************************************************************/ /* Routines to compute optimal and test to optimal P-1 bounds */ /* Stolen from Woltman's Prime95 and adapted to CUDAPm1 */ /**************************************************************/ /* This table gives the values of Dickman's function given an input */ /* between 0.000 and 0.500. These values came from a different program */ /* that did a numerical integration. */ static double savedF[501] = { 0, 0, 0, 0, 0, 0, 3.3513e-215, 5.63754e-208, 4.00865e-201, 1.65407e-194, 4.53598e-188, 8.93587e-182, 1.33115e-175, 1.55557e-169, 1.46609e-163, 1.13896e-157, 7.42296e-152, 3.80812e-146, 1.56963e-140, 5.32886e-135, 1.51923e-129, 3.69424e-124, 7.76066e-119, 1.42371e-113, 2.30187e-108, 3.30619e-103, 4.24793e-098, 4.80671e-093, 4.78516e-088, 4.22768e-083, 3.33979e-078, 2.37455e-073, 1.52822e-068, 8.94846e-064, 4.78909e-059, 4.65696e-057, 4.49802e-055, 4.31695e-053, 4.07311e-051, 3.81596e-049, 3.61043e-047, 1.73046e-045, 8.26375e-044, 3.9325e-042, 1.86471e-040, 8.8102e-039, 4.14402e-037, 1.99497e-035, 1.83001e-034, 1.59023e-033, 1.45505e-032, 1.24603e-031, 1.15674e-030, 9.70832e-030, 9.23876e-029, 4.20763e-028, 4.24611e-027, 1.61371e-026, 6.59556e-026, 3.17069e-025, 1.12205e-024, 4.65874e-024, 2.01267e-023, 6.2941e-023, 3.02604e-022, 7.84622e-022, 2.3526e-021, 6.7049e-021, 1.88634e-020, 4.59378e-020, 1.37233e-019, 4.00682e-019, 8.34209e-019, 2.21612e-018, 4.84252e-018, 1.02457e-017, 2.03289e-017, 4.07704e-017, 1.33778e-016, 2.4263e-016, 4.14981e-016, 7.0383e-016, 1.20511e-015, 3.85644e-015, 6.52861e-015, 1.06563e-014, 1.67897e-014, 2.79916e-014, 4.54319e-014, 9.83296e-014, 1.66278e-013, 2.61858e-013, 4.03872e-013, 5.98967e-013, 1.09674e-012, 1.70553e-012, 2.56573e-012, 3.72723e-012, 6.14029e-012, 9.33636e-012, 1.36469e-011, 1.89881e-011, 2.68391e-011, 4.12016e-011, 5.94394e-011, 8.43746e-011, 1.12903e-010, 1.66987e-010, 2.36959e-010, 3.11726e-010, 4.28713e-010, 5.90781e-010, 7.79892e-010, 1.05264e-009, 1.4016e-009, 1.87506e-009, 2.42521e-009, 3.14508e-009, 4.38605e-009, 5.43307e-009, 6.96737e-009, 8.84136e-009, 1.16286e-008, 1.42343e-008, 1.79697e-008, 2.30867e-008, 2.88832e-008, 3.52583e-008, 4.31032e-008, 5.46444e-008, 6.66625e-008, 8.06132e-008, 1.00085e-007, 1.20952e-007, 1.4816e-007, 1.80608e-007, 2.13125e-007, 2.5324e-007, 3.094e-007, 3.64545e-007, 4.31692e-007, 5.19078e-007, 6.03409e-007, 7.21811e-007, 8.53856e-007, 9.71749e-007, 1.13949e-006, 1.37042e-006, 1.53831e-006, 1.79066e-006, 2.15143e-006, 2.40216e-006, 2.76872e-006, 3.20825e-006, 3.61263e-006, 4.21315e-006, 4.76404e-006, 5.43261e-006, 6.2041e-006, 6.96243e-006, 7.94979e-006, 8.89079e-006, 1.01387e-005, 1.13376e-005, 1.2901e-005, 1.44183e-005, 1.59912e-005, 1.79752e-005, 1.99171e-005, 2.22665e-005, 2.47802e-005, 2.7678e-005, 3.0492e-005, 3.34189e-005, 3.71902e-005, 4.12605e-005, 4.54706e-005, 4.98411e-005, 5.48979e-005, 6.06015e-005, 6.61278e-005, 7.22258e-005, 7.97193e-005, 8.66574e-005, 9.48075e-005, 0.00010321, 0.000112479, 0.000121776, 0.000133344, 0.000144023, 0.000156667, 0.000168318, 0.000183192, 0.000196527, 0.00021395, 0.000228389, 0.000249223, 0.000264372, 0.000289384, 0.000305707, 0.000333992, 0.000353287, 0.000379868, 0.000408274, 0.00043638, 0.000465319, 0.000496504, 0.000530376, 0.000566008, 0.000602621, 0.000642286, 0.000684543, 0.000723853, 0.000772655, 0.000819418, 0.000868533, 0.000920399, 0.000975529, 0.00103188, 0.00109478, 0.00115777, 0.00122087, 0.00128857, 0.00136288, 0.00143557, 0.00151714, 0.00159747, 0.00167572, 0.00176556, 0.00186199, 0.00195063, 0.00205239, 0.00216102, 0.00225698, 0.00236962, 0.00249145, 0.00259636, 0.00272455, 0.00287006, 0.00297545, 0.00312346, 0.0032634, 0.00340298, 0.00355827, 0.00371195, 0.00387288, 0.00404725, 0.00420016, 0.00439746, 0.00456332, 0.00475936, 0.00495702, 0.00514683, 0.00535284, 0.00557904, 0.00578084, 0.00601028, 0.00623082, 0.00647765, 0.00673499, 0.00696553, 0.00722529, 0.00748878, 0.00775537, 0.00803271, 0.00832199, 0.00861612, 0.00889863, 0.00919876, 0.00953343, 0.00985465, 0.0101993, 0.0105042, 0.0108325, 0.0112019, 0.0115901, 0.0119295, 0.0123009, 0.0127191, 0.0130652, 0.0134855, 0.0139187, 0.0142929, 0.0147541, 0.0151354, 0.0156087, 0.0160572, 0.0165382, 0.0169669, 0.0174693, 0.017946, 0.0184202, 0.0189555, 0.0194336, 0.0200107, 0.0204863, 0.0210242, 0.0216053, 0.0221361, 0.0226858, 0.0232693, 0.0239027, 0.0244779, 0.025081, 0.0257169, 0.0263059, 0.0269213, 0.0275533, 0.0282065, 0.0289028, 0.029567, 0.0302268, 0.0309193, 0.0316619, 0.0323147, 0.0330398, 0.0338124, 0.0345267, 0.0353038, 0.0360947, 0.0368288, 0.0376202, 0.0383784, 0.0391894, 0.0399684, 0.0408148, 0.0416403, 0.042545, 0.0433662, 0.0442498, 0.0451003, 0.046035, 0.0468801, 0.0478059, 0.0487442, 0.0496647, 0.0505752, 0.0515123, 0.0524792, 0.0534474, 0.0544682, 0.0554579, 0.0565024, 0.0574619, 0.0584757, 0.0595123, 0.0605988, 0.0615874, 0.062719, 0.0637876, 0.064883, 0.0659551, 0.0670567, 0.0681256, 0.0692764, 0.0704584, 0.0715399, 0.0727237, 0.0738803, 0.0750377, 0.0762275, 0.0773855, 0.0785934, 0.0797802, 0.0810061, 0.0822205, 0.0834827, 0.084714, 0.0858734, 0.0871999, 0.0884137, 0.0896948, 0.090982, 0.0922797, 0.093635, 0.0948243, 0.0961283, 0.0974718, 0.0988291, 0.100097, 0.101433, 0.102847, 0.104222, 0.105492, 0.106885, 0.10833, 0.109672, 0.111048, 0.112438, 0.113857, 0.115311, 0.11673, 0.118133, 0.119519, 0.12099, 0.122452, 0.123905, 0.125445, 0.126852, 0.128326, 0.129793, 0.131277, 0.132817, 0.134305, 0.135772, 0.137284, 0.138882, 0.140372, 0.14192, 0.143445, 0.14494, 0.146515, 0.148145, 0.149653, 0.151199, 0.152879, 0.154368, 0.155958, 0.157674, 0.159211, 0.160787, 0.16241, 0.164043, 0.165693, 0.167281, 0.168956, 0.170589, 0.172252, 0.173884, 0.175575, 0.177208, 0.178873, 0.180599, 0.18224, 0.183975, 0.185654, 0.187363, 0.189106, 0.190729, 0.19252, 0.194158, 0.195879, 0.197697, 0.199391, 0.201164, 0.202879, 0.204602, 0.206413, 0.20818, 0.209911, 0.211753, 0.213484, 0.215263, 0.21705, 0.218869, 0.220677, 0.222384, 0.224253, 0.226071, 0.227886, 0.229726, 0.231529, 0.233373, 0.235234, 0.237081, 0.238853, 0.240735, 0.242606, 0.244465, 0.246371, 0.248218, 0.250135, 0.251944, 0.253836, 0.255708, 0.257578, 0.259568, 0.261424, 0.263308, 0.265313, 0.26716, 0.269073, 0.271046, 0.272921, 0.274841, 0.276819, 0.278735, 0.280616, 0.282653, 0.284613, 0.286558, 0.288478, 0.290472, 0.292474, 0.294459, 0.296379, 0.298382, 0.300357, 0.302378, 0.30434, 0.306853 }; /* This evaluates Dickman's function for any value. See Knuth vol. 2 */ /* for a description of this function and its use. */ double F (double x) { int i; if (x >= 1.0) return (1.0); if (x >= 0.5) return (1.0 + log (x)); i = (int) (x * 1000.0); return (savedF[i] + (x * 1000.0 - i) * (savedF[i+1] - savedF[i])); } /* Analyze how well P-1 factoring will perform */ void guess_pminus1_bounds ( int guess_exp, /* N in K*B^N+C. Exponent to test. */ int how_far_factored, /* Bit depth of trial factoring */ int tests_saved, /* 1 if doublecheck, 2 if first test */ int vals, int *bound1, int *bound2, double *success_rate) { int guess_B1, guess_B2, /*vals,*/ i; double h, pass1_squarings, pass2_squarings; double logB1, logB2, kk, logkk, temp, logtemp, log2; double prob, gcd_cost, ll_tests, numprimes; struct { int B1; int B2; double prob; double pass1_squarings; double pass2_squarings; } best[2]; for (i=0; i<2; i++) { best[i].B1=0; best[i].B2=0; best[i].prob=0; best[i].pass1_squarings=0; best[i].pass2_squarings=0; } /* Guard against wild tests_saved values. Huge values will cause this routine */ /* to run for a very long time. This shouldn't happen as auxiliaryWorkUnitInit */ /* now has the exact same test. */ if (tests_saved > 10) tests_saved = 10; /* Balance P-1 against 1 or 2 LL tests (actually more since we get a */ /* corrupt result reported some of the time). */ ll_tests = (double) tests_saved + 2 * 0.018; /* Precompute the cost of a GCD. We used Excel to come up with the */ /* formula GCD is equivalent to 861 * Ln (p) - 7775 transforms. */ /* Since one squaring equals two transforms we get the formula below. */ /* NOTE: In version 22, the GCD speed has approximately doubled. I've */ /* adjusted the formula accordingly. */ gcd_cost = (430.5 * log ((double) guess_exp) - 3887.5) / 2.0; if (gcd_cost < 50.0) gcd_cost = 50.0; /* Compute how many temporaries we can use given our memory constraints. */ /* Allow 1MB for code and data structures. */ // vals = cvt_mem_to_estimated_gwnums (max_mem (thread_num), k, b, n, c); // if (vals < 1) vals = 1; //vals = 176; /* Find the best B1 */ log2 = log ((double) 2.0); for (guess_B1 = 10000; ; guess_B1 += 5000) { /* Constants */ logB1 = log ((double) guess_B1); /* Compute how many squarings will be required in pass 1 */ pass1_squarings = ceil (1.44 * guess_B1); /* Try a lot of B2 values */ for (guess_B2 = guess_B1; guess_B2 <= guess_B1 * 100; guess_B2 += guess_B1 >> 2) { /* Compute how many squarings will be required in pass 2. In the */ /* low-memory cases, assume choose_pminus1_plan will pick D = 210, E = 1 */ /* If more memory is available assume choose_pminus1_plan will pick */ /* D = 2310, E = 2. This will provide an accurate enough cost for our */ /* purposes even if different D and E values are picked. See */ /* choose_pminus1_plan for a description of the costs of P-1 stage 2. */ /* For cudapm1, we're not set up for e = 1, assume e = 2 in both cases*/ logB2 = log ((double) guess_B2); numprimes = (unsigned long) (guess_B2 / (logB2 - 1.0) - guess_B1 / (logB1 - 1.0)); if (guess_B2 <= guess_B1) { pass2_squarings = 0.0; } else if (vals <= 8) { /* D = 210, E = 1, passes = 48/temps */ unsigned long num_passes; num_passes = (unsigned long) ceil (48.0 / (vals - 3)); pass2_squarings = ceil ((guess_B2 - guess_B1) / 210.0) * num_passes; pass2_squarings += numprimes * 1.1; } else { unsigned long num_passes; double numpairings; num_passes = (unsigned long) ceil (480.0 / (vals - 3)); numpairings = (unsigned long) (numprimes / 2.0 * numprimes / ((guess_B2-guess_B1) * 480.0/2310.0)); pass2_squarings = 2400.0 + num_passes * 90.0; /* setup costs */ pass2_squarings += ceil ((guess_B2-guess_B1) / 4620.0) * 2.0 * num_passes; /*number of base changes per pass * e with e = 2*/ pass2_squarings += numprimes - numpairings; /*these are the sub_mul operations*/ } /* Pass 2 FFT multiplications seem to be at least 20% slower than */ /* the squarings in pass 1. This is probably due to several factors. */ /* These include: better L2 cache usage and no calls to the faster */ /* gwsquare routine. Nov, 2009: On my Macbook Pro, with exponents */ /* around 45M and using 800MB memory, pass2 squarings are 40% slower. */ /* Owftheevil reports that CUDA squarings are only about 2% slower. */ /* New normaliztion kernels benefit stage 1 more than stage 2, back to 9% */ pass2_squarings *= 1.09; // was 1.35 /* What is the "average" value that must be smooth for P-1 to succeed? */ /* Ordinarily this is 1.5 * 2^how_far_factored. However, for Mersenne */ /* numbers the factor must be of the form 2kp+1. Consequently, the */ /* value that must be smooth (k) is much smaller. */ kk = 1.5 * pow (2.0, how_far_factored); // if (k == 1.0 && b == 2 && c == -1) kk = kk / 2.0 / guess_exp; logkk = log (kk); /* Set temp to the number that will need B1 smooth if k has an */ /* average-sized factor found in stage 2 */ temp = kk / ((guess_B1 + guess_B2) / 2); logtemp = log (temp); /* Loop over increasing bit lengths for the factor */ prob = 0.0; for (h = how_far_factored; ; ) { double prob1, prob2; /* If temp < 1.0, then there are no factor to find in this bit level */ if (logtemp > 0.0) { /* See how many smooth k's we should find using B1 */ /* Using Dickman's function (see Knuth pg 382-383) we want k^a <= B1 */ prob1 = F (logB1 / logkk); /* See how many smooth k's we should find using B2 */ /* Adjust this slightly to eliminate k's that have two primes > B1 and < B2 */ /* Do this by assuming the largest factor is the average of B1 and B2 */ /* and the remaining cofactor is B1 smooth */ prob2 = prob1 + (F (logB2 / logkk) - prob1) * (F (logB1 / logtemp) / F (logB2 / logtemp)); if (prob2 < 0.0001) break; /* Add this data in to the total chance of finding a factor */ prob += prob2 / (h + 0.5); } /* Move to next bit level */ h += 1.0; logkk += log2; logtemp += log2; } /* See if this is a new best case scenario */ if (guess_B2 == guess_B1 || prob * ll_tests * guess_exp - pass2_squarings > best[0].prob * ll_tests * guess_exp - best[0].pass2_squarings){ best[0].B2 = guess_B2; best[0].prob = prob; best[0].pass2_squarings = pass2_squarings; if (vals < 4) break; continue; } if (prob * ll_tests * guess_exp - pass2_squarings < 0.9 * (best[0].prob * ll_tests * guess_exp - best[0].pass2_squarings)) break; continue; } /* Is this the best B1 thusfar? */ if (guess_B1 == 10000 || best[0].prob * ll_tests * guess_exp - (pass1_squarings + best[0].pass2_squarings) > best[1].prob * ll_tests * guess_exp - (best[1].pass1_squarings + best[1].pass2_squarings)) { best[1].B1 = guess_B1; best[1].B2 = best[0].B2; best[1].prob = best[0].prob; best[1].pass1_squarings = pass1_squarings; best[1].pass2_squarings = best[0].pass2_squarings; continue; } if (best[0].prob * ll_tests * guess_exp - (pass1_squarings + best[0].pass2_squarings) < 0.9 * (best[1].prob * ll_tests * guess_exp - (best[1].pass1_squarings + best[1].pass2_squarings))) break; continue; } /* Return the final best choice */ if (best[1].prob * ll_tests * guess_exp > best[1].pass1_squarings + best[1].pass2_squarings + gcd_cost) { *bound1 = best[1].B1; *bound2 = best[1].B2; // *squarings = (unsigned long) // (best[1].pass1_squarings + // best[1].pass2_squarings + gcd_cost); *success_rate = best[1].prob; } else { *bound1 = 0; *bound2 = 0; // *squarings = 0; *success_rate = 0.0; } } /************************************************************** * * Main Function * **************************************************************/ int stage2_init_param3(int e, int n, int trans, float *err) { int j, i, k = 0, base; mpz_t exponent; long long b[7]; for(i = 0; i <= e/2; i++) { base = 2 * i + 1; b[i] = 1; for(j = 0; j < e / 2; j++) b[i] *= base; b[i] *= b[i]; } for(i = e/2; i > 0; i--) { while (k < i) { j = i; while(j > k) { b[j] = b[j] - b[j-1]; j--; } k++; j = i; while(j > k) { b[j] = b[j] - b[j-1]; j--; } } } mpz_init(exponent); for(i = 0; i <= e / 2; i++) { mpz_set_ui (exponent, b[i]); trans = E_to_the_p(&e_data[2 * i * n], g_x, exponent, n, trans, err); if(i > 0) { cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) &e_data[2 * i * n], (cufftDoubleComplex *) &e_data[2 * i * n], CUFFT_INVERSE)); copy_kernel<<<n / (2*threads1), threads1>>>(&e_data[(2 * i - 1) * n], &e_data[2 * i * n]); trans++; } } E_pre_mul(&e_data[e * n], &e_data[e * n], n, 0); E_pre_mul(&e_data[0], &e_data[0], n, 1); trans++; mpz_clear(exponent); return trans; } int next_base1(int k, int e, int n, int trans, float *err) { int j; if(k == 1) return(stage2_init_param3(e, n, trans, err)); if(k > 3) { if(k <= e + 1) { E_mul(&e_data[(k - 3) * n], &e_data[(k - 2) * n], &e_data[(k - 3) * n], n, *err, 0); j = e + 3 - k; trans += 2 * (k - 3); } else { E_half_mul(&e_data[(e-1) * n], &e_data[(e-1) * n], &e_data[e * n], n, *err); j = 1; trans += 2 * ( e - 1); } for(; j < e-1; j++) E_mul(&e_data[(e-j-1) * n], &e_data[(e-j) * n], &e_data[(e-j-1) * n], n, *err, 1); cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) &e_data[1 * n], (cufftDoubleComplex *) &e_data[1 * n], CUFFT_INVERSE)); } E_half_mul(&e_data[0], &e_data[1 * n], &e_data[0], n, *err); E_pre_mul(&e_data[0], &e_data[0], n, 1); trans += 2; return trans; } int stage2_init_param1(int k, int base, int e, int n, int trans, float *err) { int i, j; if(base > 1) { mpz_t exponent; mpz_init(exponent); mpz_ui_pow_ui (exponent, base, e); trans = E_to_the_p(&e_data[0], g_x, exponent, n, trans, err); E_pre_mul(g_x, &e_data[0], n, 1); trans++; mpz_clear(exponent); } if(k < 2 * e) for(j = 1; j <= k; j += 2) { trans = next_base1(j, e, n, trans, err); cutilSafeThreadSync(); } else { mpz_t *exponents; exponents = (mpz_t *) malloc((e + 1) * sizeof(mpz_t)); for(j = 0; j <= e; j++) mpz_init(exponents[j]); for(j = e; j >= 0; j--) mpz_ui_pow_ui (exponents[j], (k - j * 2), e); for(j = 0; j < e; j++) for(i = e; i > j; i--) mpz_sub(exponents[i], exponents[i-1], exponents[i]); for(j = 0; j <= e; j++) trans = E_to_the_p(&e_data[j * n], g_x, exponents[j], n, trans, err); for(j = 0; j <= e; j++) mpz_clear(exponents[j]); E_pre_mul(&e_data[0], &e_data[0], n, 1); E_pre_mul(&e_data[e * n], &e_data[e * n], n, 1); for(j = 1; j < e; j++) cufftSafeCall (cufftExecZ2Z (plan, (cufftDoubleComplex *) &e_data[j * n], (cufftDoubleComplex *) &e_data[j * n], CUFFT_INVERSE)); trans += e + 1; } return trans; } int stage2_init_param2(int num, int cur_rp, int base, int e, int n, uint8 *gaps, int trans, float *err) { int rp = 1, j = 0, i; mpz_t exponent; mpz_init(exponent); while(j < cur_rp) { j++; rp += 2 * gaps[j]; } for(i = 0; i < num; i++) { mpz_ui_pow_ui (exponent, rp, e); trans = E_to_the_p(&rp_data[i * n], g_x, exponent, n, trans, err); E_pre_mul(&rp_data[i * n], &rp_data[i * n], n, 1); trans++; j++; if(rp < base - 1) rp += 2 * gaps[j]; } mpz_clear(exponent); return trans; } int stage2_init_param4(int num, int cur_rp, int base, int e, int n, uint8 *gaps, int trans, float *err) { int rp = 1, j = 0, i, k = 1; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } trans = stage2_init_param1(rp, 1, e, n, trans, err); copy_kernel<<<n / (2*threads1), threads1>>>(&rp_data[0], &e_data[0]); k = rp + 2; for(i = 1; i < num; i++) { j++; rp += 2 * gaps[j]; while(k <= rp) { trans = next_base1(k, e, n, trans, err); cutilSafeThreadSync(); k += 2; } copy_kernel<<<n / (2*threads1), threads1>>>(&rp_data[i * n], &e_data[0]); } return trans; } int rp_init_count1(int k, int base, int e, int n) { int i, j, trans = 0; int numb[6] = {10,38,102,196,346,534}; int numb1[11] = {2,8,18,32,50,72,96,120,144,168,192}; mpz_t exponent; mpz_init(exponent); mpz_ui_pow_ui (exponent, base, e); trans += (int) mpz_sizeinbase (exponent, 2) + (int) mpz_popcount(exponent) - 2; mpz_clear(exponent); if(k < 2 * e) { trans = 2 * trans + 1; trans += numb[e / 2 - 1] + numb1[k/2-1]; return(trans); } else { mpz_t *exponents; exponents = (mpz_t *) malloc((e+1) * sizeof(mpz_t)); for(j = 0; j <= e; j++) mpz_init(exponents[j]); for(j = e; j >= 0; j--) mpz_ui_pow_ui (exponents[j], (k - j * 2), e); for(j = 0; j < e; j++) for(i = e; i > j; i--) mpz_sub(exponents[i], exponents[i-1], exponents[i]); for(j = 0; j <= e; j++) { trans += (int) mpz_sizeinbase (exponents[j], 2) + (int) mpz_popcount(exponents[j]) - 2; } for(j = 0; j <= e; j++) mpz_clear(exponents[j]); return 2 * (trans + e + 2) - 1; } } int rp_init_count1a(int k, int base, int e, int n) { int trans; int numb[6] = {10,38,102,196,346,534}; int numb1[12] = {0,2,8,18,32,50,72,96,120,144,168,192}; trans = (int) (e * log2((double)base) * 3.0 ); if(k < 2 * e) { trans += numb[e/2-1] + numb1[(k+1)/2-1]; } else { if(e == 2) trans += (int) (9.108 * log2((double)k) + 10.7); else if(e == 4) trans += (int) (30.349 * log2((double)k) + 50.5); else if(e == 6) trans += (int) (64.560 * log2((double)k) + 137.6); else if(e == 8) trans += (int) (110.224 * log2((double)k) + 265.2); else if(e == 10) trans += (int) (168.206 * log2((double)k) + 478.6); else trans += (int) (237.888 * log2((double)k) + 731.5); } return trans; } int rp_init_count2(int num, int cur_rp, int e, int n, uint8 *gaps) { int rp = 1, j = 0, i, trans = 0; int numb[6] = {10,38,102,196,346,534}; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } if(cur_rp == 0) trans -= e * e / 2 - 1; cur_rp = rp; if(rp == 1) trans += numb[e/2-1]; else trans = rp_init_count1(rp, 1, e, n); for(i = 1; i < num; i++) { j++; rp += 2 * gaps[j]; } trans += e * (rp - cur_rp); return trans; } int rp_init_count2a(int cur_rp, int e, int n, uint8 *gaps) { int rp = 1, j = 0, trans = 0; int numb[6] = {10,38,102,196,346,534}; while(j < cur_rp) { j++; rp += 2 * gaps[j]; } if(cur_rp == 0) trans -= e * e / 2 - 1; cur_rp = rp; if(rp == 1) trans += numb[e/2-1]; else trans = rp_init_count1a(rp, 1, e, n); return trans; } int stage2(int *x_int, unsigned *x_packed, int q, int n, int nrp, float err) { int j, i = 0, t; int e, d, b2 = g_b2; int rpt = 0, rp; int ks, ke, m = 0, k; int last = 0; uint8 *bprimes = NULL; int prime, prime_pair; uint8 *rp_gaps = NULL; int sprimes[] = {3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 43, 47, 53, 0}; uint8 two_to_i[] = {1, 2, 4, 8, 16, 32, 64, 128}; int count0 = 0, count1 = 0, count2 = 0; mpz_t control; timeval time0, time1; { int best_guess = 0x01111111; int best_d = 0, best_e = 0, best_nrp = 0; int guess; int passes; int su; int nrpe = 0; int start_e = 2, end_e = 12; int start_d = 9240, d_div = 1; if(g_e) { start_e = g_e; end_e = g_e; } if(g_d) { start_d = g_d; d_div = g_d; } for(d = start_d; d > 1;d /= d_div) { if(d >= 2310) { rpt = d / 2310 * 480; i = 4; } else if(d >= 210) { rpt = d / 210 * 48; i = 3; } else if(d >= 30) { rpt = d / 30 * 8; i = 2; } //else if(d >= 6) // { // rpt = d / 6 * 2; // i = 1; //} if(b1 * sprimes[i] * 53 < b2) ks = ((((b1 * 53 + 1) >> 1) + d - 1) / d - 1) * d; else if(b1 * sprimes[i] < b2) ks = ((((b2 / sprimes[i] + 1) >> 1) + d - 1) / d - 1) * d; else ks = ((((b1 + 1) >> 1) + d - 1) / d - 1) * d; ke = ((((b2 + 1) >> 1) + d - 1) / d) * d; ks = ((ks / d) << 1) + 1; ke = (ke / d) << 1; for(e = start_e; e <= end_e; e +=2) { nrpe = nrp - e - 1; if(nrpe <= 0) break; passes = (rpt + nrpe - 1) / nrpe; while(nrpe > 1 && passes == (rpt + nrpe - 2) / (nrpe - 1)) nrpe--; guess = rp_init_count1a(ks, d, e, n) * passes; for(su = 0; su < rpt; su += nrpe)guess += rp_init_count1a((su * d / rpt) | 1, 1, e, n); guess += 2 * e * (d/2 - passes) - e * e / 2; double numprimes = (double) ke*d / (log ((double) ke*d) - 1.0) - (double) b1 / (log ((double) b1) - 1.0); double numpairings = numprimes / 2.0 * numprimes / ((double) ((ke - ks)*d) * (double) rpt / d); guess += e * (ke - ks) * passes + (2.2) * (int)(numprimes-numpairings); if(e == 4) guess = (int) guess * 0.95; if(e == 6) guess = (int) guess * 0.90; if(e == 12) guess = (int) guess * 0.85; if(guess < best_guess) { best_guess = guess; best_d = d; best_e = e; best_nrp = nrpe; } } if(d>2310) d -= 2310; else if(d>210) d -= 210; else if(d>=30) d -= 30; //else if(d>=6) d -= 6; } d = best_d; e = best_e; nrp = best_nrp; } if(d == 0) exit(3); int end = (q + 31) / 32; if(x_packed[end + 10] == 0) { x_packed[end + 10] = b2; x_packed[end + 11] = d; x_packed[end + 12] = e; x_packed[end + 13] = nrp; x_packed[end + 14] = 0; // m = number of relative primes already finished x_packed[end + 15] = 0; // k = how far done with currect crop of relative primes x_packed[end + 16] = 0; // t = where to find next relativel prime in the bit array x_packed[end + 17] = 0; // extra initialization transforms from starting in the middle of a pass } else { b1 = x_packed[end + 5]; b2 = x_packed[end + 10]; d = x_packed[end + 11]; e = x_packed[end + 12]; nrp = x_packed[end + 13]; } g_e = e; printf("Using b1 = %d, b2 = %d, d = %d, e = %d, nrp = %d\n",b1, b2,d,e,nrp); if(d % 2310 == 0) { i = 4; rpt = 480 * d / 2310; } else if(d % 210 == 0) { i = 3; rpt = 48 * d / 210; } else if(d % 30 == 0) { i = 2; rpt = 8 * d / 30; } else { i = 1; rpt = 2 * d / 6; } if(b1 * sprimes[i] * 53 < b2) ks = ((((b1 * 53 + 1) >> 1) + d - 1) / d - 1) * d; else if(b1 * sprimes[i] < b2) ks = ((((b2 / sprimes[i] + 1) >> 1) + d - 1) / d - 1) * d; else ks = ((((b1 + 1) >> 1) + d - 1) / d - 1) * d; ke = ((((b2 + 1) >> 1) + d - 1) / d) * d; bprimes = (uint8*) malloc(ke * sizeof(uint8)); if(!bprimes) { printf("failed to allocate bprimes\n"); exit (1); } for (j = 0; j < ke; j++) bprimes[j] = 0; gtpr(2 * ke, bprimes); for(j = 0; j < 10; j++) bprimes[j] = 1; bprimes[0] = bprimes[4] = bprimes[7] = 0; cutilSafeCall (cudaMalloc ((void **) &e_data, sizeof (double) * n * (e + 1))); cutilSafeCall (cudaMalloc ((void **) &rp_data, sizeof (double) * n * nrp)); for( j = (b1 + 1) >> 1; j < ks; j++) { if(bprimes[j] == 1) { m = i; last = j; while(sprimes[m]) { prime = sprimes[m] * j + (sprimes[m] >> 1); m++; if(prime < ks) continue; if(prime > ke) break; prime_pair = prime + d - 1 - ((prime % d) << 1); bprimes[last] = 0; bprimes[prime] = 1; if(bprimes[prime_pair]) break; last = prime; } } } rp_gaps = (uint8*) malloc(rpt * sizeof(uint8)); if(!rp_gaps) { printf("failed to allocate rp_gaps\n"); exit (1); } j = 0; k = 0; for(rp = 1; rp < d; rp += 2) { k++; for (m = 0; m < i; m++) if((rp % sprimes[m]) == 0) break; if(m == i) { rp_gaps[j] = k; j++; k = 0; } } k = ks + (d >> 1); m = k - 1; j = 0; rp = 0; uint8 *tprimes = (uint8*) malloc(rpt / 8 * sizeof(uint8)); int l = 0; while(m < ke) { tprimes[l] = 0; for(i = 0; i < 8; i++) { m += rp_gaps[j]; k -= rp_gaps[j]; if (bprimes[m] || bprimes[k]) { tprimes[l] |= two_to_i[i]; count1++; } else count0++; if (bprimes[m] && bprimes[k]) count2++; j++; if(j == rpt) { j = 0; m += (d >> 1); k = m + 1; } } l++; if(l * 8 == rpt) { for(t = 0; t < (rpt >> 3); t++) bprimes[rp + t] = tprimes[t]; l = 0; rp += rpt >> 3; } } free(tprimes); printf("Zeros: %d, Ones: %d, Pairs: %d\n", count0, count1, count2); mpz_init(control); mpz_import(control, (ke - ks) / d * rpt / sizeof(bprimes[0]) , -1, sizeof(bprimes[0]), 0, 0, bprimes); free(bprimes); unpack_bits_int(x_int, x_packed, q, n); balance_digits_int(x_int, q, n); cudaMemcpy (&g_xint[n], x_int, sizeof (int) * n , cudaMemcpyHostToDevice); int fp = 1; int num_tran = 0, temp_tran; int tran_save; int itran_tot; int ptran_tot; int itran_done = 0; int ptran_done = 0; double checkpoint_int, checkpoint_bnd; double time, ptime = 0.0, itime = 0.0; ks = ((ks / d) << 1) + 1; ke = (ke / d) << 1; m = x_packed[end + 14]; k = x_packed[end + 15]; t = x_packed[end + 16]; if(m + k > 0) // some stage 2 has already been done { itran_done = x_packed[end + 18] + x_packed[end + 17]; ptran_done = x_packed[end + 19]; itime = x_packed[end + 20]; ptime = x_packed[end + 21]; } ptran_tot = (ke - ks - 1) * e * ((rpt + nrp - 1) / nrp) + count1 * 2; int passes; passes = (rpt + nrp - 1) / nrp; itran_tot = rp_init_count1(ks, d, e, n) * passes + x_packed[end + 17]; int su = 0; while (su < rpt) { if(rpt - su > nrp) { itran_tot += rp_init_count2(nrp, su, e, n, rp_gaps); } else { itran_tot += rp_init_count2(rpt - su, su, e, n, rp_gaps); } su += nrp; } if (k == 0) k = ks; if(nrp > rpt - m) nrp = rpt - m; gettimeofday (&time0, NULL); do { printf("Processing %d - %d of %d relative primes.\n", m + 1, m + nrp, rpt); printf("Inititalizing pass... "); apply_weights <<<n / (2 * threads1), threads1>>> (g_x, &g_xint[0], g_ttmp); E_pre_mul(g_x, g_x, n, 1); num_tran = stage2_init_param4(nrp, m, d, e, n, rp_gaps, num_tran, &err); temp_tran = num_tran; num_tran = stage2_init_param1(k, d, e, n, num_tran, &err); apply_weights <<<n / (2 * threads1), threads1>>> (g_x, &g_xint[n], g_ttmp); temp_tran = num_tran - temp_tran; itran_done += num_tran; if((m > 0 || k > ks) && fp) { x_packed[end + 17] += num_tran; itran_tot += num_tran; } fp = 0; cutilSafeCall (cudaMemcpy (&err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); gettimeofday (&time1, NULL); time = 1000000.0 * (double)(time1.tv_sec - time0.tv_sec) + time1.tv_usec - time0.tv_usec; itime += time / 1000000.0; if(!quitting) { printf("done. transforms: %d, err = %0.5f, (%0.2f real, %0.4f ms/tran, ETA ", num_tran, err, time / 1000000.0, time / (float) (num_tran * 1000)); if(m == 0 && k == ks) printf("NA"); else print_time_from_seconds((int) (itime * ((double) itran_tot/itran_done - 1) + ptime * ((double) ptran_tot / ptran_done - 1))); } printf(")\n"); time0.tv_sec = time1.tv_sec; time0.tv_usec = time1.tv_usec; num_tran = 0; tran_save = 0; checkpoint_int = (ke - ks) / 2 * e + count1 * nrp / (double) rpt; int chkp_per_pass; chkp_per_pass = RINT_x86(checkpoint_int / checkpoint_iter); if(chkp_per_pass == 0) chkp_per_pass = 1; int next_checkpoint = ke - 1; checkpoint_int = (ke - ks + 1) / (double) chkp_per_pass; checkpoint_bnd = ks - 2.0; while((int) checkpoint_bnd < k) checkpoint_bnd += checkpoint_int; next_checkpoint = RINT_x86(checkpoint_bnd); next_checkpoint |= 1; for( ; k < ke && !quitting; k += 2) { int t_last = -1; { i = nrp - 1; while(i && !mpz_tstbit (control, t + i)) i--; if(i) t_last = t + i; } for(j = 0; j < nrp; j++) { if(mpz_tstbit (control, t)) { E_sub_mul(g_x, g_x, &e_data[0], &rp_data[j * n], n, err, t == t_last); num_tran += 2; if(num_tran % 200 == 0) { cutilSafeCall (cudaMemcpy (&err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(err > 0.4) quitting = 2; } else if(polite_f && num_tran % (2 * polite) == 0) cutilSafeThreadSync(); } t++; } _kbhit(); t += rpt - nrp; if(!quitting) { if(k < ke - 1) num_tran = next_base1(k, e, n, num_tran, &err); if(num_tran % 200 < 2 * e) { cutilSafeCall (cudaMemcpy (&err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(err > 0.4) quitting = 2; } else if(polite_f && num_tran % (2 * polite) < 2 * e) cutilSafeThreadSync(); } if(k == next_checkpoint || quitting == 1) { checkpoint_bnd += checkpoint_int; next_checkpoint = RINT_x86(checkpoint_bnd); next_checkpoint |= 1; if(quitting == 1) cutilSafeCall (cudaMemcpy (&err, g_err, sizeof (float), cudaMemcpyDeviceToHost)); if(err <= 0.4f) { cutilSafeCall (cudaMemcpy (x_int, &g_xint[n], sizeof (int) * n, cudaMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); pack_bits_int(x_int, x_packed, q, n); x_packed[end + 13] = nrp; if(k < ke - 1) { x_packed[end + 14] = m; x_packed[end + 15] = k + 2; x_packed[end + 16] = t; } else { x_packed[end + 14] = m + nrp; x_packed[end + 15] = ks; x_packed[end + 16] = m + nrp; } gettimeofday (&time1, NULL); time = 1000000.0 * (double)(time1.tv_sec - time0.tv_sec) + time1.tv_usec - time0.tv_usec; ptime += time / 1000000.0; x_packed[end + 18] = itran_done; x_packed[end + 19] = ptran_done + num_tran; x_packed[end + 20] = itime; x_packed[end + 21] = ptime; write_st2_checkpoint(x_packed, q); printf ("Transforms: %5d ", num_tran - tran_save); printbits_int (x_int, q, n, 0, 0, NULL, 0); printf (" err = %5.5f (", err); print_time_from_seconds ((int) time1.tv_sec - time0.tv_sec); printf (" real, %4.4f ms/tran, ETA ", time / 1000.0 / (num_tran - tran_save)); print_time_from_seconds((int) itime * ((double) itran_tot/itran_done - 1) + ptime * ((double) ptran_tot / (ptran_done + num_tran) - 1)); printf(")\n"); fflush(stdout); tran_save = num_tran; time0.tv_sec = time1.tv_sec; time0.tv_usec = time1.tv_usec; reset_err(&err, 0.85f); } } } k = ks; m += nrp; t = m; if(rpt - m < nrp) nrp = rpt - m; ptran_done += num_tran; num_tran = 0; printf("\n"); } while(m < rpt && !quitting); if(quitting < 2) { if(!quitting) printf("Stage 2 complete, %d transforms, estimated total time = ", ptran_done + itran_done); else printf("Quitting, estimated time spent = "); print_time_from_seconds((int) itime + ptime); printf("\n"); } else if (quitting == 2) printf ("err = %5.5g >= 0.40, quitting.\n", err); free(rp_gaps); cutilSafeCall (cudaFree ((char *) e_data)); cutilSafeCall (cudaFree ((char *) rp_data)); mpz_clear (control); return 0; } int check_pm1 (int q, char *expectedResidue) { int n, j, last = 0; int error_flag, checkpoint_flag; int *x_int = NULL; unsigned *x_packed = NULL; float maxerr = 0.0f, terr; int restarting = 0; timeval time0, time1; int total_time = 0, start_time; int j_resume = 0; int bit; unsigned *control = NULL; int stage = 0, st1_factor = 0; size_t global_mem, free_mem, use_mem; int nrp = g_nrp; signal (SIGTERM, SetQuitting); signal (SIGINT, SetQuitting); cudaMemGetInfo(&free_mem, &global_mem); #ifdef _MSC_VER printf("CUDA reports %IuM of %IuM GPU memory free.\n",free_mem/1024/1024, global_mem/1024/1024); #else printf("CUDA reports %zuM of %zuM GPU memory free.\n",free_mem/1024/1024, global_mem/1024/1024); #endif do { /* while (restarting) */ maxerr = 0.0; if(stage == 0) { if(!x_packed) x_packed = read_checkpoint_packed(q); x_int = init_lucas_packed_int (x_packed, q, &n, &j, &stage, &total_time); } if(!x_int) exit (2); if(stage == 2) { if(read_st2_checkpoint(q, x_packed)) { printf("Stage 2 checkpoint found.\n"); int end = (q + 31) / 32; b1 = x_packed[end + 5]; } else printf("No stage 2 checkpoint.\n"); } g_d = g_d_commandline; if(g_nrp == 0) nrp = ((free_mem - (size_t) unused_mem * 1024 * 1024)/ n / 8 - 7); #ifdef _MSC_VER if (nrp > (4096/sizeof(double))*1024*1024/n) nrp = (4096/sizeof(double))*1024*1024/n; // Max single allocation of 4 GB on Windows? #endif if(nrp < 4) nrp = 4; use_mem = (size_t) (8 * (nrp + 7)* (size_t) n); #ifdef _MSC_VER printf("Using up to %IuM GPU memory.\n",use_mem/1024/1024); #else printf("Using up to %zuM GPU memory.\n",use_mem/1024/1024); #endif if (free_mem < use_mem) printf("WARNING: There may not be enough GPU memory for stage 2!\n"); double successrate = 0.0; if ((g_b1_commandline == 0) || (g_b2_commandline == 0)) { guess_pminus1_bounds(q, tfdepth, llsaved, nrp, &b1, &g_b2, &successrate); } if (g_b1_commandline > 0) b1 = g_b1_commandline; if (g_b2_commandline > 0) g_b2 = g_b2_commandline; if ((g_b1_commandline == 0) && (g_b2_commandline == 0)) printf("Selected B1=%d, B2=%d, %0.3g%% chance of finding a factor\n",b1,g_b2,successrate*100); if(x_packed[(q + 31)/32 + 5] == 0 || restarting) x_packed[(q + 31)/32 + 5] = b1; else { b1 = x_packed[(q + 31)/32 + 5]; printf("Using B1 = %d from savefile.\n", b1); fflush(stdout); } if (g_b2 > 1000000000) printf("WARNING: Expected failure with B2 > 1000000000!\n"); //max B2 supported? fflush(stdout); if(stage == 1) { if(control) free(control); control = get_control(&last, b1, 0, q); } gettimeofday (&time0, NULL); start_time = time0.tv_sec; restarting = 0; if(j == 1) { printf ("Starting stage 1 P-1, M%d, B1 = %d, B2 = %d, fft length = %dK\n", q, b1, g_b2, n/1024); printf ("Doing %d iterations\n", last); //restarting = round_off_test(q, n, &j, control, last); //if(restarting) stage = 0; } else { if(stage == 1) { printf ("Continuing stage 1 from a partial result of M%d fft length = %dK, iteration = %d\n", q, n/1024, j); j_resume = j % checkpoint_iter - 1; } else { printf ("Continuing stage 2 from a partial result of M%d fft length = %dK\n", q, n/1024); } } fflush (stdout); for (; !restarting && j <= last; j++) // Main LL loop { if ((j % 100) == 0) error_flag = 1; else error_flag = 0; if ((j % checkpoint_iter == 0) || j == last) checkpoint_flag = 1; else checkpoint_flag = error_flag; bit = get_bit(last - j, control); terr = lucas_square (q, n, j, last, &maxerr, error_flag, bit, stage, checkpoint_flag); if(quitting == 1 && !checkpoint_flag) { j++; bit = get_bit(last - j, control); terr = lucas_square (q, n, j, last, &maxerr, 1, bit, stage, 1); } if (error_flag || quitting == 1) { if (terr >= 0.40) { printf ("Iteration = %d, err = %5.5g >= 0.40, quitting.\n", j, terr); quitting = 2; } } if ((j % checkpoint_iter) == 0 || quitting) { if(quitting < 2) { cutilSafeCall (cudaMemcpy (x_int, g_xint, sizeof (int) * n, cudaMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); gettimeofday (&time1, NULL); total_time += (time1.tv_sec - start_time); start_time = time1.tv_sec; set_checkpoint_data(x_packed, q, n, j + 1, stage, total_time); pack_bits_int(x_int, x_packed, q, n); write_checkpoint_packed (x_packed, q); } if(quitting == 0) { printf ("Iteration %d ", j); printbits_int (x_int, q, n, 0, 0, NULL, 0); long long diff = time1.tv_sec - time0.tv_sec; long long diff1 = 1000000 * diff + time1.tv_usec - time0.tv_usec; long long diff2 = (last - j) * diff1 / ((checkpoint_iter - j_resume) * 1e6); gettimeofday (&time0, NULL); printf (" err = %5.5f (", maxerr); print_time_from_seconds ((int) diff); printf (" real, %4.4f ms/iter, ETA ", diff1 / 1000.0 / (checkpoint_iter - j_resume)); print_time_from_seconds ((int) diff2); printf (")\n"); fflush (stdout); if(j_resume) j_resume = 0; reset_err(&maxerr, 0.85); // Instead of tracking maxerr over whole run, reset it at each checkpoint. } else { printf("Estimated time spent so far: "); print_time_from_seconds(total_time); printf("\n\n"); j = last + 1; } } if ( k_f && !quitting && (!(j & 15)) && _kbhit()) interact(); // abstracted to clean up check() fflush (stdout); } if (!restarting && !quitting) { // done with stage 1 if(stage == 1) { free ((char *) control); gettimeofday (&time1, NULL); cutilSafeCall (cudaMemcpy (x_int, g_xint, sizeof (int) * n, cudaMemcpyDeviceToHost)); standardize_digits_int(x_int, q, n, 0, n); if(g_eb1 > b1) stage = 3; else if(g_b2 > b1) stage = 2; set_checkpoint_data(x_packed, q, n, j + 1, stage, total_time); pack_bits_int(x_int, x_packed, q, n); write_checkpoint_packed (x_packed, q); printbits_int (x_int, q, n, 0, NULL , 0, 1); total_time += (time1.tv_sec - start_time); printf ("\nStage 1 complete, estimated total time = "); print_time_from_seconds(total_time); fflush (stdout); printf("\nStarting stage 1 gcd.\n"); st1_factor = get_gcd(/*x,*/ x_packed, q, n, 1); } if(!st1_factor) { if (stage == 3) { printf("Here's where we put the b1 extension calls\n"); stage = 2; } if(stage == 2) { printf("Starting stage 2.\n"); stage2(x_int, x_packed, q, n, nrp, maxerr); if(!quitting) { printf("Starting stage 2 gcd.\n"); get_gcd(x_packed, q, n, 2); rm_checkpoint(q, keep_s1); } } } printf("\n"); } close_lucas (x_int); } while (restarting); free ((char *) x_packed); return (0); } void parse_args(int argc, char *argv[], int* q, int* device_numer, int* cufftbench_s, int* cufftbench_e, int* cufftbench_d); /* The rest of the opts are global */ int main (int argc, char *argv[]) { printf("%s\n",program); quitting = 0; #define THREADS_DFLT 256 #define CHECKPOINT_ITER_DFLT 10000 #define SAVE_FOLDER_DFLT "savefiles" #define S_F_DFLT 0 #define T_F_DFLT 0 #define K_F_DFLT 0 #define D_F_DFLT 0 #define POLITE_DFLT 1 #define UNMEM_DFLT 100; #define WORKFILE_DFLT "worktodo.txt" #define RESULTSFILE_DFLT "results.txt" /* "Production" opts to be read in from command line or ini file */ int q = -1; int device_number = -1, f_f = 0; checkpoint_iter = -1; threads1 = -1; fftlen = -1; unused_mem = -1; s_f = t_f = d_f = k_f = -1; polite_f = polite = -1; AID[0] = input_filename[0] = RESULTSFILE[0] = 0; /* First character is null terminator */ char fft_str[132] = "\0"; /* Non-"production" opts */ r_f = 0; int cufftbench_s, cufftbench_e, cufftbench_d; cufftbench_s = cufftbench_e = cufftbench_d = 0; parse_args(argc, argv, &q, &device_number, &cufftbench_s, &cufftbench_e, &cufftbench_d); /* The rest of the args are globals */ if (file_exists(INIFILE)) { if( checkpoint_iter < 1 && !IniGetInt(INIFILE, "CheckpointIterations", &checkpoint_iter, CHECKPOINT_ITER_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option CheckpointIterations; using default: %d\n", CHECKPOINT_ITER_DFLT)*/; if( threads1 < 1 && !IniGetInt(INIFILE, "Threads", &threads1, THREADS_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option Threads; using default: %d\n", THREADS_DFLT); if( s_f < 0 && !IniGetInt(INIFILE, "SaveAllCheckpoints", &s_f, S_F_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option SaveAllCheckpoints; using default: off\n")*/; if( s_f > 0 && !IniGetStr(INIFILE, "SaveFolder", folder, SAVE_FOLDER_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option SaveFolder; using default: \"%s\"\n", SAVE_FOLDER_DFLT)*/; if( t_f < 0 && !IniGetInt(INIFILE, "CheckRoundoffAllIterations", &t_f, 0) ) fprintf(stderr, "Warning: Couldn't parse ini file option CheckRoundoffAllIterations; using default: off\n"); if(!IniGetInt(INIFILE, "KeepStage1SaveFile", &keep_s1, 0) ) keep_s1 = 0; if( polite < 0 && !IniGetInt(INIFILE, "Polite", &polite, POLITE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option Polite; using default: %d\n", POLITE_DFLT); if( k_f < 0 && !IniGetInt(INIFILE, "Interactive", &k_f, 0) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option Interactive; using default: off\n")*/; if( device_number < 0 && !IniGetInt(INIFILE, "DeviceNumber", &device_number, 0) ) fprintf(stderr, "Warning: Couldn't parse ini file option DeviceNumber; using default: 0\n"); if( d_f < 0 && !IniGetInt(INIFILE, "PrintDeviceInfo", &d_f, D_F_DFLT) ) /*fprintf(stderr, "Warning: Couldn't parse ini file option PrintDeviceInfo; using default: off\n")*/; if( !input_filename[0] && !IniGetStr(INIFILE, "WorkFile", input_filename, WORKFILE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option WorkFile; using default \"%s\"\n", WORKFILE_DFLT); /* I've readded the warnings about worktodo and results due to the multiple-instances-in-one-dir feature. */ if( !RESULTSFILE[0] && !IniGetStr(INIFILE, "ResultsFile", RESULTSFILE, RESULTSFILE_DFLT) ) fprintf(stderr, "Warning: Couldn't parse ini file option ResultsFile; using default \"%s\"\n", RESULTSFILE_DFLT); if( fftlen < 0 && !IniGetStr(INIFILE, "FFTLength", fft_str, "\0") ) /*fprintf(stderr, "Warning: Couldn't parse ini file option FFTLength; using autoselect.\n")*/; if( unused_mem < 0 && !IniGetInt(INIFILE, "UnusedMem", &unused_mem, 100) ) printf("Warning: Couldn't parse ini file option UnusedMem; using default.\n"); } else // no ini file { fprintf(stderr, "Warning: Couldn't find .ini file. Using defaults for non-specified options.\n"); if( checkpoint_iter < 1 ) checkpoint_iter = CHECKPOINT_ITER_DFLT; if( threads1 < 1 ) threads1 = THREADS_DFLT; if( fftlen < 0 ) fftlen = 0; if( s_f < 0 ) s_f = S_F_DFLT; if( t_f < 0 ) t_f = T_F_DFLT; if( k_f < 0 ) k_f = K_F_DFLT; if( device_number < 0 ) device_number = 0; if( d_f < 0 ) d_f = D_F_DFLT; if( polite < 0 ) polite = POLITE_DFLT; if( unused_mem < 0 ) unused_mem = UNMEM_DFLT; if( !input_filename[0] ) sprintf(input_filename, WORKFILE_DFLT); if( !RESULTSFILE[0] ) sprintf(RESULTSFILE, RESULTSFILE_DFLT); } if( fftlen < 0 ) { // possible if -f not on command line fftlen = fft_from_str(fft_str); } if (polite == 0) { polite_f = 0; polite = 1; } else { polite_f = 1; } if (threads1 != 32 && threads1 != 64 && threads1 != 128 && threads1 != 256 && threads1 != 512 && threads1 != 1024) { fprintf(stderr, "Error: thread count is invalid.\n"); fprintf(stderr, "Threads must be 2^k, 5 <= k <= 10.\n\n"); exit(2); } f_f = fftlen; // if the user has given an override... then note this length must be kept between tests init_device (device_number); fft_count = init_ffts(); if (cufftbench_d) cufftbench (cufftbench_s, cufftbench_e, cufftbench_d, device_number); else { if (s_f) { #ifndef _MSC_VER mode_t mode = S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH; if (mkdir (folder, mode) != 0) fprintf (stderr, "mkdir: cannot create directory `%s': File exists\n", folder); #else if (_mkdir (folder) != 0) fprintf (stderr, "mkdir: cannot create directory `%s': File exists\n", folder); #endif } if (q <= 0) { int error; #ifdef EBUG printf("Processed INI file and console arguments correctly; about to call get_next_assignment().\n"); #endif do { // while(!quitting) fftlen = f_f; // fftlen and AID change between tests, so be sure to reset them AID[0] = 0; error = get_next_assignment(input_filename, &q, &fftlen, &tfdepth, &llsaved, &AID); /* Guaranteed to write to fftlen ONLY if specified on workfile line, so that if unspecified, the pre-set default is kept. */ if( error > 0) exit (2); // get_next_assignment prints warning message #ifdef EBUG printf("Gotten assignment, about to call check().\n"); #endif check_pm1 (q, 0); if(!quitting) // Only clear assignment if not killed by user, i.e. test finished { error = clear_assignment(input_filename, q); if(error) exit (2); // prints its own warnings } } while(!quitting); } else // Exponent passed in as argument { if (!valid_assignment(q, fftlen)) {printf("\n");} //! v_a prints warning else { //int trft = 0; //while(!trft) { check_pm1 (q, 0); //q += 2; //while(!isprime(q)) q += 2; } } } } // end if(-r) else if(-cufft) else(workfile) } // end main() void parse_args(int argc, char *argv[], int* q, int* device_number, int* cufftbench_s, int* cufftbench_e, int* cufftbench_d) { while (argc > 1) { if (strcmp (argv[1], "-t") == 0) { t_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-h") == 0) { fprintf (stderr, "$ CUDAPm1 -h|-v\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] [-i inifile] [-threads 32|64|128|256|512|1024] [-c checkpoint_iteration] [-f fft_length] [-s folder] [-t] [-polite iteration] [-k] exponent|input_filename\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] [-i inifile] [-threads 32|64|128|256|512|1024] [-polite iteration] -r\n\n"); fprintf (stderr, "$ CUDAPm1 [-d device_number] [-info] -cufftbench start end distance\n\n"); fprintf (stderr, " -h print this help message\n"); fprintf (stderr, " -v print version number\n"); fprintf (stderr, " -info print device information\n"); fprintf (stderr, " -i set .ini file name (default = \"CUDAPm1.ini\")\n"); fprintf (stderr, " -threads set threads number (default = 256)\n"); fprintf (stderr, " -f set fft length (if round off error then exit)\n"); fprintf (stderr, " -s save all checkpoint files\n"); fprintf (stderr, " -t check round off error all iterations\n"); fprintf (stderr, " -polite GPU is polite every n iterations (default -polite 1) (-polite 0 = GPU aggressive)\n"); fprintf (stderr, " -cufftbench exec CUFFT benchmark (Ex. $ ./CUDAPm1 -d 1 -cufftbench 1179648 6291456 32768 )\n"); fprintf (stderr, " -r exec residue test.\n"); fprintf (stderr, " -k enable keys (p change -polite, t disable -t, s change -s)\n\n"); fprintf (stderr, " -b2 set b2\n\n"); fprintf (stderr, " -d2 Brent-Suyama coefficient (multiple of 30, 210, or 2310) \n\n"); fprintf (stderr, " -e2 Brent-Suyama exponent (2-12) \n\n"); //fprintf (stderr, // Now an internal parameter // " -nrp2 Relative primes per pass (divisor of 8, 48, or 480)\n\n"); exit (2); } else if (strcmp (argv[1], "-v") == 0) { printf("%s\n\n", program); exit (2); } else if (strcmp (argv[1], "-polite") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -polite option\n\n"); exit (2); } polite = atoi (argv[2]); if (polite == 0) { polite_f = 0; polite = 1; } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-r") == 0) { r_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-k") == 0) { k_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-d") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -d option\n\n"); exit (2); } *device_number = atoi (argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-i") == 0) { if(argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -i option\n\n"); exit (2); } sprintf (INIFILE, "%s", argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-info") == 0) { d_f = 1; argv++; argc--; } else if (strcmp (argv[1], "-cufftbench") == 0) { if (argc < 5 || argv[2][0] == '-' || argv[3][0] == '-' || argv[4][0] == '-') { fprintf (stderr, "can't parse -cufftbench option\n\n"); exit (2); } *cufftbench_s = atoi (argv[2]); *cufftbench_e = atoi (argv[3]); *cufftbench_d = atoi (argv[4]); argv += 4; argc -= 4; } else if (strcmp (argv[1], "-threads") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -threads option\n\n"); exit (2); } threads1 = atoi (argv[2]); if (threads1 != 32 && threads1 != 64 && threads1 != 128 && threads1 != 256 && threads1 != 512 && threads1 != 1024) { fprintf(stderr, "Error: thread count is invalid.\n"); fprintf(stderr, "Threads must be 2^k, 5 <= k <= 10.\n\n"); exit (2); } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-c") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -c option\n\n"); exit (2); } checkpoint_iter = atoi (argv[2]); if (checkpoint_iter == 0) { fprintf (stderr, "can't parse -c option\n\n"); exit (2); } argv += 2; argc -= 2; } else if (strcmp (argv[1], "-f") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -f option\n\n"); exit (2); } fftlen = fft_from_str(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-b1") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -b1 option\n\n"); exit (2); } g_b1_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-e2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -e2 option\n\n"); exit (2); } g_e = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-d2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -d2 option\n\n"); exit (2); } g_d_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-b2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -b2 option\n\n"); exit (2); } g_b2_commandline = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-nrp2") == 0) { if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -nrp option\n\n"); exit (2); } g_nrp = atoi(argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-s") == 0) { s_f = 1; if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -s option\n\n"); exit (2); } sprintf (folder, "%s", argv[2]); argv += 2; argc -= 2; } else if (strcmp (argv[1], "-eb1") == 0) { s_f = 1; if (argc < 3 || argv[2][0] == '-') { fprintf (stderr, "can't parse -eb1 option\n\n"); exit (2); } g_eb1 = atoi(argv[2]); argv += 2; argc -= 2; } else { if (*q != -1 || strcmp (input_filename, "") != 0 ) { fprintf (stderr, "can't parse options\n\n"); exit (2); } int derp = atoi (argv[1]); if (derp == 0) { sprintf (input_filename, "%s", argv[1]); } else { *q = derp; *q |= 1; while(!isprime(*q)) *q += 2; } argv++; argc--; } } if (g_d_commandline%30 != 0) { printf("-d2 must be a multiple of 30, 210, or 2310.\n"); exit(3); } if ((g_e%2 != 0) || (g_e < 0) || (g_e > 12)) { printf("-e2 must be 2, 4, 6, 8, 10, or 12.\n"); exit(3); } } int interact(void) { int c = getchar (); if (c == 'p') if (polite_f) { polite_f = 0; printf (" -polite 0\n"); } else { polite_f = 1; printf (" -polite %d\n", polite); } else if (c == 't') { t_f = 0; printf (" disabling -t\n"); } else if (c == 's') if (s_f == 1) { s_f = 2; printf (" disabling -s\n"); } else if (s_f == 2) { s_f = 1; printf (" enabling -s\n"); } if (c == 'F') { printf(" -- Increasing fft length.\n"); fftlen++; return 1; } if (c == 'f') { printf(" -- Decreasing fft length.\n"); fftlen--; return 1; } if (c == 'k') { printf(" -- fft length reset cancelled.\n"); return 2; } fflush (stdin); return 0; }
009930b5fc33155d30efd849794329ae2cb66134.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_3.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes); check_cuda_error( hipPeekAtLastError() ); hipDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( hipPeekAtLastError() ); check_cuda_error(hipFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5155355988227,0.00130464112117661,0.778394151451177,0.777964840660150,0.000176215905240282,0.484540692918688,0.00295140339852797,0.999998329361488,1.95069132761285e-08,1.90441683873891e-05,0.999773668881183,1.00752137726522,0.999999011953414,3.50467870287278e-05,0.552698876142541,10.5362185646449,139.026156778373}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4104027737061,0.000233793217484143,0.000134103897714886,0.000392835183485183,0.247705661554596,0.185613722097597,0.139724865300249,4.46736215747095,0.0148738820812480,1.45357322782103,1098.78347998281,0.000333024201998587,0.167028470911446,0.0196544238733398,0.00282952210522065,1.25600394920021e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
009930b5fc33155d30efd849794329ae2cb66134.cu
#include <stddef.h> #include <stdint.h> #include "model_gpu_utils.h" #include "ten_tusscher_2004_epi_S2_3.h" extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) { print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n"); // execution configuration const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t size = num_volumes*sizeof(real); check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ)); check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t))); kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes); check_cuda_error( cudaPeekAtLastError() ); cudaDeviceSynchronize(); return pitch_h; } extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) { // execution configuration const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE; size_t stim_currents_size = sizeof(real)*num_cells_to_solve; size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve; real *stims_currents_device; check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size)); check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice)); //the array cells to solve is passed when we are using and adapative mesh uint32_t *cells_to_solve_device = NULL; if(cells_to_solve != NULL) { check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size)); check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice)); } solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps); check_cuda_error( cudaPeekAtLastError() ); check_cuda_error(cudaFree(stims_currents_device)); if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device)); } __global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes) { // Thread ID int threadID = blockDim.x * blockIdx.x + threadIdx.x; if(threadID < num_volumes) { /* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt *((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M *((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H *((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J *((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1 *((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2 *((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs *((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S *((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R *((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D *((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F *((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa *((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G *((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai *((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR *((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai *((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5155355988227,0.00130464112117661,0.778394151451177,0.777964840660150,0.000176215905240282,0.484540692918688,0.00295140339852797,0.999998329361488,1.95069132761285e-08,1.90441683873891e-05,0.999773668881183,1.00752137726522,0.999999011953414,3.50467870287278e-05,0.552698876142541,10.5362185646449,139.026156778373}; for (uint32_t i = 0; i < NEQ; i++) *((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i]; } } // Solving the model for each cell in the tissue matrix ni x nj __global__ void solve_gpu(real dt, real *sv, real* stim_currents, uint32_t *cells_to_solve, uint32_t num_cells_to_solve, int num_steps) { int threadID = blockDim.x * blockIdx.x + threadIdx.x; int sv_id; // Each thread solves one cell model if(threadID < num_cells_to_solve) { if(cells_to_solve) sv_id = cells_to_solve[threadID]; else sv_id = threadID; real rDY[NEQ]; for (int n = 0; n < num_steps; ++n) { RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt); *((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id); for(int i = 0; i < NEQ; i++) { *((real*)((char*)sv + pitch * i) + sv_id) = rDY[i]; } } } } inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) { // State variables real svolt = *((real*)((char*)sv + pitch * 0) + threadID_); real sm = *((real*)((char*)sv + pitch * 1) + threadID_); real sh = *((real*)((char*)sv + pitch * 2) + threadID_); real sj = *((real*)((char*)sv + pitch * 3) + threadID_); real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_); real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_); real sxs = *((real*)((char*)sv + pitch * 6) + threadID_); real ss = *((real*)((char*)sv + pitch * 7) + threadID_); real sr = *((real*)((char*)sv + pitch * 8) + threadID_); real sd = *((real*)((char*)sv + pitch * 9) + threadID_); real sf = *((real*)((char*)sv + pitch * 10) + threadID_); real sfca = *((real*)((char*)sv + pitch * 11) + threadID_); real sg = *((real*)((char*)sv + pitch * 12) + threadID_); real Cai = *((real*)((char*)sv + pitch * 13) + threadID_); real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_); real Nai = *((real*)((char*)sv + pitch * 15) + threadID_); real Ki = *((real*)((char*)sv + pitch * 16) + threadID_); //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL //real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito ///#ifdef EPI real Gto=0.294; ///#endif ///#ifdef ENDO /// real Gto=0.073; ///#endif ///#ifdef MCELL /// real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; // Setting Elnaz's parameters real parameters []={14.4104027737061,0.000233793217484143,0.000134103897714886,0.000392835183485183,0.247705661554596,0.185613722097597,0.139724865300249,4.46736215747095,0.0148738820812480,1.45357322782103,1098.78347998281,0.000333024201998587,0.167028470911446,0.0196544238733398,0.00282952210522065,1.25600394920021e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; /// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
b11a8fdd3f97989a1eaa909ffe230e5f054f6b9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ===================================================================================== * * Filename: matrixtest.cu * * Description: test matrix.h and mystdlib.h * * Version: 1.0 * Created: 12/26/2013 05:51:39 PM * Revision: none * Compiler: gcc * * Author: Hoang-Ngan Nguyen (), zhoangngan-gmail * Organization: * * ===================================================================================== */ #include "matrix.h" #include <cstdlib> /* srand, rand */ #include <ctime> /* time */ #include <cstring> #include <fstream> #include <iostream> //#include <eigen3/Eigen/Core> #include <sys/times.h> using namespace std; //using namespace Eigen; // === constructTest: CUDA KERNEL ========================================{{{ // Name: constructTest // Description: // ============================================================================= __global__ void constructTest ( /* argument list: {{{*/ MatrixOnDevice A, MatrixOnDevice B, MatrixOnDevice C ) /* ------------- end of argument list ------------------------------}}}*/ { /* constructTest implementation: {{{*/ uint tid = threadIdx.x; A(tid) = B(tid) + C(tid); printf("tid = %d\n", tid); } /*}}}*/ /* ---------------- end of CUDA kernel constructTest ----------------- }}}*/ int main( int argc, char *argv[] ) { cout << "construcTest" << endl; MatrixOnHost A(3, 1, 20), B(3); A.print("A is"); MatrixOnDevice dB(3), dC(3, 1, 1); B(1) = 1; B(2) = 2; dB = B; B.print("B is"); cout << "before constructTest " << endl; hipLaunchKernelGGL(( constructTest) , dim3(1), dim3(3) , 0, 0, dB, dB, dC); cout << "after constructTest " << endl; B = dB; B.print("B is"); A.print("A is"); MatrixOnHost C = B; C.print("C is"); cout << "address of C is " << (long)&C(0) << " and B is " << (long)&B(0) << endl; cout << "End of program!!!!!!!!!!!!!!" << endl; return EXIT_SUCCESS; } // ---------- end of function main ----------
b11a8fdd3f97989a1eaa909ffe230e5f054f6b9e.cu
/* * ===================================================================================== * * Filename: matrixtest.cu * * Description: test matrix.h and mystdlib.h * * Version: 1.0 * Created: 12/26/2013 05:51:39 PM * Revision: none * Compiler: gcc * * Author: Hoang-Ngan Nguyen (), zhoangngan-gmail * Organization: * * ===================================================================================== */ #include "matrix.h" #include <cstdlib> /* srand, rand */ #include <ctime> /* time */ #include <cstring> #include <fstream> #include <iostream> //#include <eigen3/Eigen/Core> #include <sys/times.h> using namespace std; //using namespace Eigen; // === constructTest: CUDA KERNEL ========================================{{{ // Name: constructTest // Description: // ============================================================================= __global__ void constructTest ( /* argument list: {{{*/ MatrixOnDevice A, MatrixOnDevice B, MatrixOnDevice C ) /* ------------- end of argument list ------------------------------}}}*/ { /* constructTest implementation: {{{*/ uint tid = threadIdx.x; A(tid) = B(tid) + C(tid); printf("tid = %d\n", tid); } /*}}}*/ /* ---------------- end of CUDA kernel constructTest ----------------- }}}*/ int main( int argc, char *argv[] ) { cout << "construcTest" << endl; MatrixOnHost A(3, 1, 20), B(3); A.print("A is"); MatrixOnDevice dB(3), dC(3, 1, 1); B(1) = 1; B(2) = 2; dB = B; B.print("B is"); cout << "before constructTest " << endl; constructTest <<< 1, 3 >>> (dB, dB, dC); cout << "after constructTest " << endl; B = dB; B.print("B is"); A.print("A is"); MatrixOnHost C = B; C.print("C is"); cout << "address of C is " << (long)&C(0) << " and B is " << (long)&B(0) << endl; cout << "End of program!!!!!!!!!!!!!!" << endl; return EXIT_SUCCESS; } // ---------- end of function main ----------
dadadc9a923991c0dd104456cfc34bdbff82548c.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_unary_op_basic.cu * \brief GPU Implementation of unary functions. */ #include "./elemwise_binary_op.h" #include "./elemwise_unary_op.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(relu) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>); NNVM_REGISTER_OP(_backward_relu) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::relu_grad>>); NNVM_REGISTER_OP(sigmoid) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>); NNVM_REGISTER_OP(_backward_sigmoid) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sigmoid_grad>>); NNVM_REGISTER_OP(hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>); NNVM_REGISTER_OP(_backward_hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>); // softsign NNVM_REGISTER_OP(softsign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>); NNVM_REGISTER_OP(_backward_softsign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::softsign_grad>>); // erf NNVM_REGISTER_OP(erf) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>); NNVM_REGISTER_OP(_backward_erf) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>); // erfinv NNVM_REGISTER_OP(erfinv) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>); NNVM_REGISTER_OP(_backward_erfinv) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>); // copy NNVM_REGISTER_OP(_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_reshape) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(BlockGrad) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(make_loss) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); // identity output as first input, but attributes are constrainted to be like rhs NNVM_REGISTER_OP(_identity_with_attr_like_rhs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>); NNVM_REGISTER_OP(reshape_like) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); void ShapeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); hipMemcpyAsync(out_data.dptr_, in_data.shape_.data(), in_data.ndim() * sizeof(int64_t), hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(shape_array) .set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU); void SizeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); const index_t size_var = in_data.Size(); hipMemcpyAsync(out_data.dptr_, &size_var, 1U * sizeof(int64_t), hipMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(size_array) .set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU); NNVM_REGISTER_OP(Cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); NNVM_REGISTER_OP(_backward_cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); // negative NNVM_REGISTER_OP(negative) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>); // abs NNVM_REGISTER_OP(abs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>); NNVM_REGISTER_OP(_backward_abs) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >); // sign NNVM_REGISTER_OP(sign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>); NNVM_REGISTER_OP(_backward_sign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sign_grad> >); // round NNVM_REGISTER_OP(round) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>); // ceil NNVM_REGISTER_OP(ceil) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>); // floor NNVM_REGISTER_OP(floor) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>); // trunc NNVM_REGISTER_OP(trunc) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>); // rint NNVM_REGISTER_OP(rint) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>); // fix NNVM_REGISTER_OP(fix) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>); // gamma NNVM_REGISTER_OP(gamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>); NNVM_REGISTER_OP(_backward_gamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gamma_grad> >); // gammaln NNVM_REGISTER_OP(gammaln) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>); NNVM_REGISTER_OP(_backward_gammaln) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gammaln_grad> >); // digamma NNVM_REGISTER_OP(digamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::digamma>); NNVM_REGISTER_OP(_backward_digamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::trigamma> >); // logical not NNVM_REGISTER_OP(logical_not) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>); } // namespace op } // namespace mxnet
dadadc9a923991c0dd104456cfc34bdbff82548c.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_unary_op_basic.cu * \brief GPU Implementation of unary functions. */ #include "./elemwise_binary_op.h" #include "./elemwise_unary_op.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(relu) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::relu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::relu>); NNVM_REGISTER_OP(_backward_relu) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::relu_grad>>); NNVM_REGISTER_OP(sigmoid) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sigmoid>); NNVM_REGISTER_OP(_backward_sigmoid) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sigmoid_grad>>); NNVM_REGISTER_OP(hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidForward<gpu>); NNVM_REGISTER_OP(_backward_hard_sigmoid) .set_attr<FCompute>("FCompute<gpu>", HardSigmoidBackward<gpu>); // softsign NNVM_REGISTER_OP(softsign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::softsign>); NNVM_REGISTER_OP(_backward_softsign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::softsign_grad>>); // erf NNVM_REGISTER_OP(erf) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erf>); NNVM_REGISTER_OP(_backward_erf) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erf_grad>>); // erfinv NNVM_REGISTER_OP(erfinv) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::erfinv>); NNVM_REGISTER_OP(_backward_erfinv) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::erfinv_grad>>); // copy NNVM_REGISTER_OP(_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_copy) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); NNVM_REGISTER_OP(_backward_reshape) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(BlockGrad) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); NNVM_REGISTER_OP(make_loss) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeEx<gpu>); // identity output as first input, but attributes are constrainted to be like rhs NNVM_REGISTER_OP(_identity_with_attr_like_rhs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::IdentityComputeFirstItemEx<gpu>); NNVM_REGISTER_OP(reshape_like) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::IdentityCompute<gpu>); void ShapeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); cudaMemcpyAsync(out_data.dptr_, in_data.shape_.data(), in_data.ndim() * sizeof(int64_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(shape_array) .set_attr<FCompute>("FCompute<gpu>", ShapeComputeGPU); void SizeComputeGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mshadow; using namespace mxnet_op; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); const TBlob& in_data = inputs[0]; const TBlob& out_data = outputs[0]; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); const index_t size_var = in_data.Size(); cudaMemcpyAsync(out_data.dptr_, &size_var, 1U * sizeof(int64_t), cudaMemcpyHostToDevice, mshadow::Stream<gpu>::GetStream(s)); } NNVM_REGISTER_OP(size_array) .set_attr<FCompute>("FCompute<gpu>", SizeComputeGPU); NNVM_REGISTER_OP(Cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); NNVM_REGISTER_OP(_backward_cast) .set_attr<FCompute>("FCompute<gpu>", CastCompute<gpu>); // negative NNVM_REGISTER_OP(negative) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::negation>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::negation>); // abs NNVM_REGISTER_OP(abs) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::abs>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::abs>); NNVM_REGISTER_OP(_backward_abs) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute<gpu, unary_bwd<mshadow_op::sign> >); // sign NNVM_REGISTER_OP(sign) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::sign>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::sign>); NNVM_REGISTER_OP(_backward_sign) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::sign_grad> >); // round NNVM_REGISTER_OP(round) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::round>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::round>); // ceil NNVM_REGISTER_OP(ceil) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::ceil>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::ceil>); // floor NNVM_REGISTER_OP(floor) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::floor>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::floor>); // trunc NNVM_REGISTER_OP(trunc) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::trunc>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::trunc>); // rint NNVM_REGISTER_OP(rint) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::rint>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::rint>); // fix NNVM_REGISTER_OP(fix) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::fix>) .set_attr<FComputeEx>("FComputeEx<gpu>", UnaryOp::ComputeEx<gpu, mshadow_op::fix>); // gamma NNVM_REGISTER_OP(gamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gamma>); NNVM_REGISTER_OP(_backward_gamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gamma_grad> >); // gammaln NNVM_REGISTER_OP(gammaln) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::gammaln>); NNVM_REGISTER_OP(_backward_gammaln) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::gammaln_grad> >); // digamma NNVM_REGISTER_OP(digamma) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::digamma>); NNVM_REGISTER_OP(_backward_digamma) .set_attr<FCompute>("FCompute<gpu>", ElemwiseBinaryOp::Compute< gpu, unary_bwd<mshadow_op::trigamma> >); // logical not NNVM_REGISTER_OP(logical_not) .set_attr<FCompute>("FCompute<gpu>", UnaryOp::Compute<gpu, mshadow_op::nt>); } // namespace op } // namespace mxnet
41a515770d87da23a45e5389f270331c0e1cfdf0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** * This is a program of password cracking of two alphabets and four digits using cuda. * AV72, FT27, IR75, SC55 these passwords were provided and adding some digits to make four digits. * To Compile: nvcc -o CudaCrack CudaCrack.cu To run: ./CudaCrack *****************************************************************************/ __device__ char password_text[4][7]={"AV7212","ES2112","GT5912","RB9612"}; __device__ void displayResult(char *password) { printf("Password found is: %s\n",password); } __device__ void is_a_match(char *perform) { char *a = perform; char *b = perform; char *c = perform; char *d = perform; char *ps_1 = password_text[0]; char *ps_2 = password_text[1]; char *ps_3 = password_text[2]; char *ps_4 = password_text[3]; while(*a == *ps_1) { if(*a == '\0') { displayResult(perform); break; } a++; ps_1++; } while(*b == *ps_2) { if(*b == '\0') { displayResult(perform); break; } b++; ps_2++; } while(*c == *ps_3) { if(*c == '\0') { displayResult(perform); break; } c++; ps_3++; } while(*d == *ps_4) { if(*d == '\0') { displayResult(perform); break; } d++; ps_4++; } return; } __global__ void kernel() { char w,x,y,z; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char first_val = i; char second_val = j; password[0] = first_val; password[1] = second_val; for(w='0'; w<='9'; w++){ for(x='0'; x<='9'; x++){ for(y='0'; y<='9'; y++){ for(z='0'; z<='9'; z++){ password[2] = w; password[3] = x; password[4] = y; password[5] = z; is_a_match(password); } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); dim3 block_Dim(26,1,1), thread_Dim(26,1,1); hipLaunchKernelGGL(( kernel) , dim3(block_Dim),dim3(thread_Dim), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
41a515770d87da23a45e5389f270331c0e1cfdf0.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** * This is a program of password cracking of two alphabets and four digits using cuda. * AV72, FT27, IR75, SC55 these passwords were provided and adding some digits to make four digits. * To Compile: nvcc -o CudaCrack CudaCrack.cu To run: ./CudaCrack *****************************************************************************/ __device__ char password_text[4][7]={"AV7212","ES2112","GT5912","RB9612"}; __device__ void displayResult(char *password) { printf("Password found is: %s\n",password); } __device__ void is_a_match(char *perform) { char *a = perform; char *b = perform; char *c = perform; char *d = perform; char *ps_1 = password_text[0]; char *ps_2 = password_text[1]; char *ps_3 = password_text[2]; char *ps_4 = password_text[3]; while(*a == *ps_1) { if(*a == '\0') { displayResult(perform); break; } a++; ps_1++; } while(*b == *ps_2) { if(*b == '\0') { displayResult(perform); break; } b++; ps_2++; } while(*c == *ps_3) { if(*c == '\0') { displayResult(perform); break; } c++; ps_3++; } while(*d == *ps_4) { if(*d == '\0') { displayResult(perform); break; } d++; ps_4++; } return; } __global__ void kernel() { char w,x,y,z; char password[7]; password[6] = '\0'; int i = blockIdx.x+65; int j = threadIdx.x+65; char first_val = i; char second_val = j; password[0] = first_val; password[1] = second_val; for(w='0'; w<='9'; w++){ for(x='0'; x<='9'; x++){ for(y='0'; y<='9'; y++){ for(z='0'; z<='9'; z++){ password[2] = w; password[3] = x; password[4] = y; password[5] = z; is_a_match(password); } } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); dim3 block_Dim(26,1,1), thread_Dim(26,1,1); kernel <<<block_Dim,thread_Dim>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
27e1b5f2157bc7cfe9bf91211a75ea711db1dbb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_exp (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(exp)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
27e1b5f2157bc7cfe9bf91211a75ea711db1dbb5.cu
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void uplo_exp (const int sd, const int unit, const int bottom, const REAL* a, const int offset_a, const int ld_a, REAL* b, const int offset_b, const int ld_b) { const int gid_0 = blockIdx.x * blockDim.x + threadIdx.x; const int gid_1 = blockIdx.y * blockDim.y + threadIdx.y; const bool valid = (gid_0 < sd) && (gid_1 < sd); const bool check = valid && ((unit == 132) ? bottom * gid_0 > bottom * gid_1 : bottom * gid_0 >= bottom * gid_1); if (check) { b[offset_b + gid_0 + gid_1 * ld_b] = CAST(exp)(a[offset_a + gid_0 + gid_1 * ld_a]); } }
add.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; }
add.cuh
#ifndef ADD_H #define ADD_H #include "vector.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> /** Componentwise addition of two vectors a and b, storing the result in a vector c. Vectors must all be of the same length. Returns: 0 on success, non-zero otherwise */ int add_dVectors(dVector a, dVector b, dVector c); void test_add(int n); #endif
add.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; }
add.cu
#include "includes.h" #define N 10 __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; if (tid < N) c[tid] = a[tid] + b[tid]; }
e166dc05ffa7fbb1da9f20ba4ca052fa941cd04b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/dictionary/update_keys.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/stream_compaction.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/search.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace dictionary { namespace detail { /** * @brief Create a new dictionary column by adding the new keys elements * to the existing dictionary_column. * * ``` * Example: * d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}} * d2 = add_keys( d1, [d, b, e] ) * d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]} * ``` * */ std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS( !new_keys.has_nulls(), "Keys must not have nulls" ); auto old_keys = dictionary_column.keys(); // [a,b,c,d,f] CUDF_EXPECTS( new_keys.type()==old_keys.type(), "Keys must be the same type"); // first, concatenate the keys together // [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e] auto combined_keys = cudf::detail::concatenate(std::vector<column_view>{old_keys, new_keys}, mr, stream); // sort and remove any duplicates from the combined keys // drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f] auto table_keys = experimental::detail::drop_duplicates( table_view{{*combined_keys}}, std::vector<size_type>{0}, // only one key column experimental::duplicate_keep_option::KEEP_FIRST, true, mr, stream )->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // create a map for the indices // lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5] auto map_indices = cudf::experimental::detail::lower_bound( table_view{{keys_column->view()}}, table_view{{old_keys}}, std::vector<order>{order::ASCENDING}, std::vector<null_order>{null_order::AFTER}, // should be no nulls here mr, stream); // now create the indices column -- map old values to the new ones // gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0] column_view indices_view( data_type{INT32}, dictionary_column.size(), dictionary_column.indices().data<int32_t>(), nullptr, 0, dictionary_column.offset() ); auto table_indices = cudf::experimental::detail::gather( table_view{{map_indices->view()}}, indices_view, false, true, false, // ignore out-of-bounds mr, stream )->release(); // the result may contain nulls if the input contains nulls and the corresponding index is therefore invalid auto contents = table_indices.front()->release(); auto indices_column = std::make_unique<column>( data_type{INT32}, dictionary_column.size(), std::move(*(contents.data.release())), rmm::device_buffer{}, 0 ); // create new dictionary column with keys_column and indices_column return make_dictionary_column( std::move(keys_column), std::move(indices_column), copy_bitmask( dictionary_column.parent(), stream, mr), // nulls have dictionary_column.null_count() ); // not changed } } // namespace detail std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { return detail::add_keys(dictionary_column, keys,mr); } } // namespace dictionary } // namespace cudf
e166dc05ffa7fbb1da9f20ba4ca052fa941cd04b.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/dictionary/update_keys.hpp> #include <cudf/dictionary/dictionary_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/stream_compaction.hpp> #include <cudf/detail/concatenate.cuh> #include <cudf/detail/stream_compaction.hpp> #include <cudf/detail/gather.hpp> #include <cudf/detail/search.hpp> #include <rmm/thrust_rmm_allocator.h> namespace cudf { namespace dictionary { namespace detail { /** * @brief Create a new dictionary column by adding the new keys elements * to the existing dictionary_column. * * ``` * Example: * d1 = {[a, b, c, d, f], {4, 0, 3, 1, 2, 2, 2, 4, 0}} * d2 = add_keys( d1, [d, b, e] ) * d2 is now {[a, b, c, d, e, f], [5, 0, 3, 1, 2, 2, 2, 5, 0]} * ``` * */ std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& new_keys, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS( !new_keys.has_nulls(), "Keys must not have nulls" ); auto old_keys = dictionary_column.keys(); // [a,b,c,d,f] CUDF_EXPECTS( new_keys.type()==old_keys.type(), "Keys must be the same type"); // first, concatenate the keys together // [a,b,c,d,f] + [d,b,e] = [a,b,c,d,f,d,b,e] auto combined_keys = cudf::detail::concatenate(std::vector<column_view>{old_keys, new_keys}, mr, stream); // sort and remove any duplicates from the combined keys // drop_duplicates([a,b,c,d,f,d,b,e]) = [a,b,c,d,e,f] auto table_keys = experimental::detail::drop_duplicates( table_view{{*combined_keys}}, std::vector<size_type>{0}, // only one key column experimental::duplicate_keep_option::KEEP_FIRST, true, mr, stream )->release(); std::unique_ptr<column> keys_column(std::move(table_keys.front())); // create a map for the indices // lower_bound([a,b,c,d,e,f],[a,b,c,d,f]) = [0,1,2,3,5] auto map_indices = cudf::experimental::detail::lower_bound( table_view{{keys_column->view()}}, table_view{{old_keys}}, std::vector<order>{order::ASCENDING}, std::vector<null_order>{null_order::AFTER}, // should be no nulls here mr, stream); // now create the indices column -- map old values to the new ones // gather([4,0,3,1,2,2,2,4,0],[0,1,2,3,5]) = [5,0,3,1,2,2,2,5,0] column_view indices_view( data_type{INT32}, dictionary_column.size(), dictionary_column.indices().data<int32_t>(), nullptr, 0, dictionary_column.offset() ); auto table_indices = cudf::experimental::detail::gather( table_view{{map_indices->view()}}, indices_view, false, true, false, // ignore out-of-bounds mr, stream )->release(); // the result may contain nulls if the input contains nulls and the corresponding index is therefore invalid auto contents = table_indices.front()->release(); auto indices_column = std::make_unique<column>( data_type{INT32}, dictionary_column.size(), std::move(*(contents.data.release())), rmm::device_buffer{}, 0 ); // create new dictionary column with keys_column and indices_column return make_dictionary_column( std::move(keys_column), std::move(indices_column), copy_bitmask( dictionary_column.parent(), stream, mr), // nulls have dictionary_column.null_count() ); // not changed } } // namespace detail std::unique_ptr<column> add_keys( dictionary_column_view const& dictionary_column, column_view const& keys, rmm::mr::device_memory_resource* mr) { return detail::add_keys(dictionary_column, keys,mr); } } // namespace dictionary } // namespace cudf
c8b3dfd8cae701fbba73db42673efc38bd5379ef.hip
// !!! This is a file automatically generated by hipify!!! //general parts #include <stdio.h> #include <vector> #include <memory> #include <string.h> #include <chrono> #include <thread> #include <iostream> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> //CUDA parts #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hipfft.h> #define GROUP 1 void sample_3_benchmark_cuFFT_single_3d(bool file_output, FILE* output, int device_id) { if (file_output) fprintf(output, "3 - cuFFT FFT + iFFT C2C multidimensional benchmark in single precision\n"); printf("3 - cuFFT FFT + iFFT C2C multidimensional benchmark in single precision\n"); hipSetDevice(device_id); const int num_benchmark_samples = 39; const int num_runs = 3; uint64_t benchmark_dimensions[num_benchmark_samples][4] = { {1024, 1024, 1, 2}, {720, 480, 1, 2},{1280, 720, 1, 2},{1920, 1080, 1, 2}, {2560, 1440, 1, 2},{3840, 2160, 1, 2},{7680, 4320, 1, 2}, {(uint64_t)pow(2,6), (uint64_t)pow(2,6), 1, 2}, {(uint64_t)pow(2,7), (uint64_t)pow(2,6), 1, 2}, {(uint64_t)pow(2,7), (uint64_t)pow(2,7), 1, 2}, {(uint64_t)pow(2,8), (uint64_t)pow(2,7), 1, 2},{(uint64_t)pow(2,8), (uint64_t)pow(2,8), 1, 2}, {(uint64_t)pow(2,9), (uint64_t)pow(2,8), 1, 2},{(uint64_t)pow(2,9), (uint64_t)pow(2,9), 1, 2}, {(uint64_t)pow(2,10), (uint64_t)pow(2,9), 1, 2},{(uint64_t)pow(2,10), (uint64_t)pow(2,10), 1, 2}, {(uint64_t)pow(2,11), (uint64_t)pow(2,10), 1, 2},{(uint64_t)pow(2,11), (uint64_t)pow(2,11), 1, 2}, {(uint64_t)pow(2,12), (uint64_t)pow(2,11), 1, 2},{(uint64_t)pow(2,12), (uint64_t)pow(2,12), 1, 2}, {(uint64_t)pow(2,13), (uint64_t)pow(2,12), 1, 2}, {(uint64_t)pow(2,13), (uint64_t)pow(2,13), 1, 2},{(uint64_t)pow(2,14), (uint64_t)pow(2,13), 1, 2}, {(uint64_t)pow(2,4), (uint64_t)pow(2,4), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,4), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,5), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,5), (uint64_t)pow(2,5), 3},{(uint64_t)pow(2,6), (uint64_t)pow(2,5), (uint64_t)pow(2,5), 3} ,{(uint64_t)pow(2,6), (uint64_t)pow(2,6), (uint64_t)pow(2,5), 3} , {(uint64_t)pow(2,6), (uint64_t)pow(2,6), (uint64_t)pow(2,6), 3},{(uint64_t)pow(2,7), (uint64_t)pow(2,6), (uint64_t)pow(2,6), 3} ,{(uint64_t)pow(2,7), (uint64_t)pow(2,7), (uint64_t)pow(2,6), 3} ,{(uint64_t)pow(2,7), (uint64_t)pow(2,7), (uint64_t)pow(2,7), 3},{(uint64_t)pow(2,8), (uint64_t)pow(2,7), (uint64_t)pow(2,7), 3} , {(uint64_t)pow(2,8), (uint64_t)pow(2,8), (uint64_t)pow(2,7), 3} , {(uint64_t)pow(2,8), (uint64_t)pow(2,8), (uint64_t)pow(2,8), 3},{(uint64_t)pow(2,9), (uint64_t)pow(2,8), (uint64_t)pow(2,8), 3}, {(uint64_t)pow(2,9), (uint64_t)pow(2,9), (uint64_t)pow(2,8), 3},{(uint64_t)pow(2,9), (uint64_t)pow(2,9), (uint64_t)pow(2,9), 3} }; double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples hipfftComplex* inputC = (hipfftComplex*)malloc((uint64_t)sizeof(hipfftComplex)*pow(2, 27)); for (uint64_t i = 0; i < pow(2, 27); i++) { inputC[i].x = 2 * ((float)rand()) / RAND_MAX - 1.0; inputC[i].y = 2 * ((float)rand()) / RAND_MAX - 1.0; } for (int n = 0; n < num_benchmark_samples; n++) { double run_time[num_runs][2]; for (int r = 0; r < num_runs; r++) { hipfftHandle planC2C; hipfftComplex* dataC; uint64_t dims[3] = { benchmark_dimensions[n][0] , benchmark_dimensions[n][1] ,benchmark_dimensions[n][2] }; hipMalloc((void**)&dataC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2]); hipMemcpy(dataC, inputC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2], hipMemcpyHostToDevice); if (hipGetLastError() != hipSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } switch (benchmark_dimensions[n][3]) { case 1: hipfftPlan1d(&planC2C, dims[0], HIPFFT_C2C, 1); break; case 2: hipfftPlan2d(&planC2C, dims[1], dims[0], HIPFFT_C2C); break; case 3: hipfftPlan3d(&planC2C, dims[2], dims[1], dims[0], HIPFFT_C2C); break; } float totTime = 0; uint64_t cuBufferSize = sizeof(float) * 2 * dims[0] * dims[1] * dims[2]; uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize; if (num_iter == 0) num_iter = 1; std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now(); for (int i = 0; i < num_iter; i++) { hipfftExecC2C(planC2C, dataC, dataC, -1); hipfftExecC2C(planC2C, dataC, dataC, 1); } hipDeviceSynchronize(); std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now(); totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter; run_time[r][0] = totTime; if (n > 0) { if (r == num_runs - 1) { double std_error = 0; double avg_time = 0; for (uint64_t t = 0; t < num_runs; t++) { avg_time += run_time[t][0]; } avg_time /= num_runs; for (uint64_t t = 0; t < num_runs; t++) { std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time); } std_error = sqrt(std_error / num_runs); if (file_output) fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time)); printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time)); benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time; } } hipfftDestroy(planC2C); hipFree(dataC); hipDeviceSynchronize(); //hipfftComplex* output_cuFFT = (hipfftComplex*)(malloc(sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2])); //hipMemcpy(output_cuFFT, dataC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2], hipMemcpyDeviceToHost); //hipDeviceSynchronize(); } } free(inputC); benchmark_result[0] /= (num_benchmark_samples - 1); if (file_output) fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); }
c8b3dfd8cae701fbba73db42673efc38bd5379ef.cu
//general parts #include <stdio.h> #include <vector> #include <memory> #include <string.h> #include <chrono> #include <thread> #include <iostream> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> //CUDA parts #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cufft.h> #define GROUP 1 void sample_3_benchmark_cuFFT_single_3d(bool file_output, FILE* output, int device_id) { if (file_output) fprintf(output, "3 - cuFFT FFT + iFFT C2C multidimensional benchmark in single precision\n"); printf("3 - cuFFT FFT + iFFT C2C multidimensional benchmark in single precision\n"); cudaSetDevice(device_id); const int num_benchmark_samples = 39; const int num_runs = 3; uint64_t benchmark_dimensions[num_benchmark_samples][4] = { {1024, 1024, 1, 2}, {720, 480, 1, 2},{1280, 720, 1, 2},{1920, 1080, 1, 2}, {2560, 1440, 1, 2},{3840, 2160, 1, 2},{7680, 4320, 1, 2}, {(uint64_t)pow(2,6), (uint64_t)pow(2,6), 1, 2}, {(uint64_t)pow(2,7), (uint64_t)pow(2,6), 1, 2}, {(uint64_t)pow(2,7), (uint64_t)pow(2,7), 1, 2}, {(uint64_t)pow(2,8), (uint64_t)pow(2,7), 1, 2},{(uint64_t)pow(2,8), (uint64_t)pow(2,8), 1, 2}, {(uint64_t)pow(2,9), (uint64_t)pow(2,8), 1, 2},{(uint64_t)pow(2,9), (uint64_t)pow(2,9), 1, 2}, {(uint64_t)pow(2,10), (uint64_t)pow(2,9), 1, 2},{(uint64_t)pow(2,10), (uint64_t)pow(2,10), 1, 2}, {(uint64_t)pow(2,11), (uint64_t)pow(2,10), 1, 2},{(uint64_t)pow(2,11), (uint64_t)pow(2,11), 1, 2}, {(uint64_t)pow(2,12), (uint64_t)pow(2,11), 1, 2},{(uint64_t)pow(2,12), (uint64_t)pow(2,12), 1, 2}, {(uint64_t)pow(2,13), (uint64_t)pow(2,12), 1, 2}, {(uint64_t)pow(2,13), (uint64_t)pow(2,13), 1, 2},{(uint64_t)pow(2,14), (uint64_t)pow(2,13), 1, 2}, {(uint64_t)pow(2,4), (uint64_t)pow(2,4), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,4), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,5), (uint64_t)pow(2,4), 3} ,{(uint64_t)pow(2,5), (uint64_t)pow(2,5), (uint64_t)pow(2,5), 3},{(uint64_t)pow(2,6), (uint64_t)pow(2,5), (uint64_t)pow(2,5), 3} ,{(uint64_t)pow(2,6), (uint64_t)pow(2,6), (uint64_t)pow(2,5), 3} , {(uint64_t)pow(2,6), (uint64_t)pow(2,6), (uint64_t)pow(2,6), 3},{(uint64_t)pow(2,7), (uint64_t)pow(2,6), (uint64_t)pow(2,6), 3} ,{(uint64_t)pow(2,7), (uint64_t)pow(2,7), (uint64_t)pow(2,6), 3} ,{(uint64_t)pow(2,7), (uint64_t)pow(2,7), (uint64_t)pow(2,7), 3},{(uint64_t)pow(2,8), (uint64_t)pow(2,7), (uint64_t)pow(2,7), 3} , {(uint64_t)pow(2,8), (uint64_t)pow(2,8), (uint64_t)pow(2,7), 3} , {(uint64_t)pow(2,8), (uint64_t)pow(2,8), (uint64_t)pow(2,8), 3},{(uint64_t)pow(2,9), (uint64_t)pow(2,8), (uint64_t)pow(2,8), 3}, {(uint64_t)pow(2,9), (uint64_t)pow(2,9), (uint64_t)pow(2,8), 3},{(uint64_t)pow(2,9), (uint64_t)pow(2,9), (uint64_t)pow(2,9), 3} }; double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples cufftComplex* inputC = (cufftComplex*)malloc((uint64_t)sizeof(cufftComplex)*pow(2, 27)); for (uint64_t i = 0; i < pow(2, 27); i++) { inputC[i].x = 2 * ((float)rand()) / RAND_MAX - 1.0; inputC[i].y = 2 * ((float)rand()) / RAND_MAX - 1.0; } for (int n = 0; n < num_benchmark_samples; n++) { double run_time[num_runs][2]; for (int r = 0; r < num_runs; r++) { cufftHandle planC2C; cufftComplex* dataC; uint64_t dims[3] = { benchmark_dimensions[n][0] , benchmark_dimensions[n][1] ,benchmark_dimensions[n][2] }; cudaMalloc((void**)&dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]); cudaMemcpy(dataC, inputC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice); if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return; } switch (benchmark_dimensions[n][3]) { case 1: cufftPlan1d(&planC2C, dims[0], CUFFT_C2C, 1); break; case 2: cufftPlan2d(&planC2C, dims[1], dims[0], CUFFT_C2C); break; case 3: cufftPlan3d(&planC2C, dims[2], dims[1], dims[0], CUFFT_C2C); break; } float totTime = 0; uint64_t cuBufferSize = sizeof(float) * 2 * dims[0] * dims[1] * dims[2]; uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize; if (num_iter == 0) num_iter = 1; std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now(); for (int i = 0; i < num_iter; i++) { cufftExecC2C(planC2C, dataC, dataC, -1); cufftExecC2C(planC2C, dataC, dataC, 1); } cudaDeviceSynchronize(); std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now(); totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter; run_time[r][0] = totTime; if (n > 0) { if (r == num_runs - 1) { double std_error = 0; double avg_time = 0; for (uint64_t t = 0; t < num_runs; t++) { avg_time += run_time[t][0]; } avg_time /= num_runs; for (uint64_t t = 0; t < num_runs; t++) { std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time); } std_error = sqrt(std_error / num_runs); if (file_output) fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time)); printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time)); benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time; } } cufftDestroy(planC2C); cudaFree(dataC); cudaDeviceSynchronize(); //cufftComplex* output_cuFFT = (cufftComplex*)(malloc(sizeof(cufftComplex) * dims[0] * dims[1] * dims[2])); //cudaMemcpy(output_cuFFT, dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost); //cudaDeviceSynchronize(); } } free(inputC); benchmark_result[0] /= (num_benchmark_samples - 1); if (file_output) fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0])); }
955f6aff89aaf1f14075ca5260cf4de55f096c98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <tdp/eigen/dense.h> #include <tdp/data/image.h> #include <tdp/data/managed_image.h> #include <tdp/cuda/cuda.h> #include <tdp/reductions/reductions.cuh> #include <tdp/stats/sufficientStats.h> #include <tdp/cuda/cuda.cuh> namespace tdp { template<typename T, size_t D, int BLK_SIZE> __global__ void KernelSufficientStats1stOrder( Image<Eigen::Matrix<T,D,1,Eigen::DontAlign>> I, Image<Eigen::Matrix<T,D+1,1,Eigen::DontAlign>> Isum, Image<uint16_t> z, uint16_t k, int N_PER_T ) { SharedMemory<Eigen::Matrix<T,D+1,1,Eigen::DontAlign>> smem; Eigen::Matrix<T,D+1,1,Eigen::DontAlign>* sum = smem.getPointer(); //const int tid = threadIdx.x; const int tid = threadIdx.x; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idS = idx*N_PER_T; const int N = I.Area(); const int idE = min(N,(idx+1)*N_PER_T); // caching //if (tid==0) printf("%d <? %d %d\n",idS,N,N_PER_T); sum[tid] = Eigen::Matrix<T,D+1,1,Eigen::DontAlign>::Zero(); for(int i=idS; i<idE; ++i) { if (!isNan(I[i]) && (z.ptr_ == nullptr || (z.ptr_ != nullptr && z[i]==k))) { Eigen::Matrix<T,D+1,1,Eigen::DontAlign> xi; xi.topRows(D) = I[i]; xi(D) = 1.; sum[tid] += xi; } } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLK_SIZE)/2; s>1; s>>=1) { if(tid < s) { sum[tid] += sum[tid+s]; } __syncthreads(); } if(tid < D+1) { // sum the last two remaining matrixes directly into global memory atomicAdd(&(Isum.ptr_[0](tid)), sum[0](tid)+sum[1](tid)); } } Vector4fda SufficientStats1stOrder(const Image<Vector3fda>& I) { size_t N = I.Area(); dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,256); ManagedDeviceImage<Vector4fda> Isum(1,1); hipMemset(Isum.ptr_, 0, sizeof(Vector4fda)); Image<uint16_t> z; hipLaunchKernelGGL(( KernelSufficientStats1stOrder<float,3,256>), dim3(blocks),dim3(threads), 256*sizeof(Eigen::Matrix<float,4,1,Eigen::DontAlign>), 0, I,Isum,z,0,10); checkCudaErrors(hipDeviceSynchronize()); Vector4fda sum = Vector4fda::Zero(); hipMemcpy(&sum,Isum.ptr_,sizeof(Vector4fda), hipMemcpyDeviceToHost); return sum; } Eigen::Matrix<float,4,Eigen::Dynamic, Eigen::DontAlign> SufficientStats1stOrder( const Image<Vector3fda>& I, const Image<uint16_t> z, uint16_t K) { size_t N = I.Area(); dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,256); ManagedDeviceImage<Vector4fda> Iss(K,1); hipMemset(Iss.ptr_, 0, K*sizeof(Vector4fda)); for (uint16_t k=0; k<K; ++k) { Image<Vector4fda> Issk(1,1,&Iss[k]); //std::cout << Issk.ptr_ << std::endl; hipLaunchKernelGGL(( KernelSufficientStats1stOrder<float,3,256>), dim3(blocks),dim3(threads), 256*sizeof(Eigen::Matrix<float,4,1,Eigen::DontAlign>), 0, I,Issk,z,k,10); } checkCudaErrors(hipDeviceSynchronize()); Eigen::Matrix<float,4,Eigen::Dynamic, Eigen::DontAlign> ss(4,K); hipMemcpy(ss.data(),Iss.ptr_, K*sizeof(Vector4fda), hipMemcpyDeviceToHost); return ss; } }
955f6aff89aaf1f14075ca5260cf4de55f096c98.cu
#include <tdp/eigen/dense.h> #include <tdp/data/image.h> #include <tdp/data/managed_image.h> #include <tdp/cuda/cuda.h> #include <tdp/reductions/reductions.cuh> #include <tdp/stats/sufficientStats.h> #include <tdp/cuda/cuda.cuh> namespace tdp { template<typename T, size_t D, int BLK_SIZE> __global__ void KernelSufficientStats1stOrder( Image<Eigen::Matrix<T,D,1,Eigen::DontAlign>> I, Image<Eigen::Matrix<T,D+1,1,Eigen::DontAlign>> Isum, Image<uint16_t> z, uint16_t k, int N_PER_T ) { SharedMemory<Eigen::Matrix<T,D+1,1,Eigen::DontAlign>> smem; Eigen::Matrix<T,D+1,1,Eigen::DontAlign>* sum = smem.getPointer(); //const int tid = threadIdx.x; const int tid = threadIdx.x; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int idS = idx*N_PER_T; const int N = I.Area(); const int idE = min(N,(idx+1)*N_PER_T); // caching //if (tid==0) printf("%d <? %d %d\n",idS,N,N_PER_T); sum[tid] = Eigen::Matrix<T,D+1,1,Eigen::DontAlign>::Zero(); for(int i=idS; i<idE; ++i) { if (!isNan(I[i]) && (z.ptr_ == nullptr || (z.ptr_ != nullptr && z[i]==k))) { Eigen::Matrix<T,D+1,1,Eigen::DontAlign> xi; xi.topRows(D) = I[i]; xi(D) = 1.; sum[tid] += xi; } } // old reduction..... __syncthreads(); //sync the threads #pragma unroll for(int s=(BLK_SIZE)/2; s>1; s>>=1) { if(tid < s) { sum[tid] += sum[tid+s]; } __syncthreads(); } if(tid < D+1) { // sum the last two remaining matrixes directly into global memory atomicAdd(&(Isum.ptr_[0](tid)), sum[0](tid)+sum[1](tid)); } } Vector4fda SufficientStats1stOrder(const Image<Vector3fda>& I) { size_t N = I.Area(); dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,256); ManagedDeviceImage<Vector4fda> Isum(1,1); cudaMemset(Isum.ptr_, 0, sizeof(Vector4fda)); Image<uint16_t> z; KernelSufficientStats1stOrder<float,3,256><<<blocks,threads, 256*sizeof(Eigen::Matrix<float,4,1,Eigen::DontAlign>)>>>(I,Isum,z,0,10); checkCudaErrors(cudaDeviceSynchronize()); Vector4fda sum = Vector4fda::Zero(); cudaMemcpy(&sum,Isum.ptr_,sizeof(Vector4fda), cudaMemcpyDeviceToHost); return sum; } Eigen::Matrix<float,4,Eigen::Dynamic, Eigen::DontAlign> SufficientStats1stOrder( const Image<Vector3fda>& I, const Image<uint16_t> z, uint16_t K) { size_t N = I.Area(); dim3 threads, blocks; ComputeKernelParamsForArray(blocks,threads,N/10,256); ManagedDeviceImage<Vector4fda> Iss(K,1); cudaMemset(Iss.ptr_, 0, K*sizeof(Vector4fda)); for (uint16_t k=0; k<K; ++k) { Image<Vector4fda> Issk(1,1,&Iss[k]); //std::cout << Issk.ptr_ << std::endl; KernelSufficientStats1stOrder<float,3,256><<<blocks,threads, 256*sizeof(Eigen::Matrix<float,4,1,Eigen::DontAlign>)>>>(I,Issk,z,k,10); } checkCudaErrors(cudaDeviceSynchronize()); Eigen::Matrix<float,4,Eigen::Dynamic, Eigen::DontAlign> ss(4,K); cudaMemcpy(ss.data(),Iss.ptr_, K*sizeof(Vector4fda), cudaMemcpyDeviceToHost); return ss; } }
a457fbf851a06c7c762c8ab898dc943789156f93.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "naive.h" #include <iostream> namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernIterateScan(int n, int d, int* srcBuffer, int* desBuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (n <= index) { return; } int p = 1 << d - 1; if (index < n) { desBuffer[index] = srcBuffer[index] + (p <= index ? srcBuffer[index - p] : 0); } } // This is totally amateurish, but hey, this *is* supposed to be the "naive" scan! __global__ void kernConvertInclusiveToExclusive(int n, int* srcBuffer, int* desBuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index && index < n) { desBuffer[index] = srcBuffer[index-1]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int paddedN = int(pow(2, ilog2ceil(n))); int blockSize = 128; int gridSize = ceil(paddedN * 1.0 / blockSize); int* device_flipflopA; int* device_flipflopB; int* device_srcBuffer; int* device_desBuffer; hipMalloc((void**)&device_flipflopA, paddedN * sizeof(int)); checkCUDAError("hipMalloc dev_A failed!"); hipMalloc((void**)&device_flipflopB, paddedN * sizeof(int)); checkCUDAError("hipMalloc dev_B failed!"); hipMemcpy(device_flipflopA, idata, n * sizeof(int), hipMemcpyHostToDevice); hipDeviceSynchronize(); timer().startGpuTimer(); for (int d = 1; d <= ilog2ceil(n); d++) { device_srcBuffer = d % 2 == 0 ? device_flipflopB : device_flipflopA; device_desBuffer = d % 2 == 0 ? device_flipflopA : device_flipflopB; kernIterateScan << <gridSize, blockSize >> > (n, d, device_srcBuffer, device_desBuffer); hipDeviceSynchronize(); checkCUDAError("Iteration error"); } kernConvertInclusiveToExclusive << <gridSize, blockSize >> > (n, device_desBuffer, device_srcBuffer); hipDeviceSynchronize(); timer().endGpuTimer(); hipMemcpy(odata, device_srcBuffer, n * sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(device_flipflopA); hipFree(device_flipflopB); odata[0] = 0; } } }
a457fbf851a06c7c762c8ab898dc943789156f93.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "naive.h" #include <iostream> namespace StreamCompaction { namespace Naive { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernIterateScan(int n, int d, int* srcBuffer, int* desBuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (n <= index) { return; } int p = 1 << d - 1; if (index < n) { desBuffer[index] = srcBuffer[index] + (p <= index ? srcBuffer[index - p] : 0); } } // This is totally amateurish, but hey, this *is* supposed to be the "naive" scan! __global__ void kernConvertInclusiveToExclusive(int n, int* srcBuffer, int* desBuffer) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index && index < n) { desBuffer[index] = srcBuffer[index-1]; } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { int paddedN = int(pow(2, ilog2ceil(n))); int blockSize = 128; int gridSize = ceil(paddedN * 1.0 / blockSize); int* device_flipflopA; int* device_flipflopB; int* device_srcBuffer; int* device_desBuffer; cudaMalloc((void**)&device_flipflopA, paddedN * sizeof(int)); checkCUDAError("cudaMalloc dev_A failed!"); cudaMalloc((void**)&device_flipflopB, paddedN * sizeof(int)); checkCUDAError("cudaMalloc dev_B failed!"); cudaMemcpy(device_flipflopA, idata, n * sizeof(int), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); timer().startGpuTimer(); for (int d = 1; d <= ilog2ceil(n); d++) { device_srcBuffer = d % 2 == 0 ? device_flipflopB : device_flipflopA; device_desBuffer = d % 2 == 0 ? device_flipflopA : device_flipflopB; kernIterateScan << <gridSize, blockSize >> > (n, d, device_srcBuffer, device_desBuffer); cudaDeviceSynchronize(); checkCUDAError("Iteration error"); } kernConvertInclusiveToExclusive << <gridSize, blockSize >> > (n, device_desBuffer, device_srcBuffer); cudaDeviceSynchronize(); timer().endGpuTimer(); cudaMemcpy(odata, device_srcBuffer, n * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(device_flipflopA); cudaFree(device_flipflopB); odata[0] = 0; } } }
be950acb35e9eb48f8655f8dd110f9d22c86dbbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define INSIZE 28 #define INFO_BYTE_SIZE 4 #define INITIAL_WEIGHT_VALUE -1.0f #define INITIAL_FC_WEIGHT_VALUE 1.0f #define IMAGE_WIDTH 28 #define IMAGE_HEIGHT 28 #define CONV_FILTER 5 #define SS_FILTER 4 #define FEATURES 6 #define NEURONS 10 #define CONV_OUTPUT 24 #define SS_OUTPUT 6 #define FC_OUTPUT 10 //kernel function that fill mnist_data structure->data with normalized pixel values __global__ void fillArr(unsigned char pixels[INSIZE][INSIZE], double data[INSIZE][INSIZE]){ // TO DO int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE) data[i][j] = pixels[i][j]/255.0; } //kernel function that changes the values >0 to 1 and double type to integer type __global__ void showArr(double ddata[INSIZE][INSIZE], int dshow[INSIZE][INSIZE]){ // TO DO int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE){ if(ddata[i][j]>0) dshow[i][j] = 1; else dshow[i][j] = 0; } } //mnist data structure typedef struct mnist_data{ double data[INSIZE][INSIZE]; unsigned int label; }mnist_data; //structure for images header information typedef struct images_info{ char magic_num_images[INFO_BYTE_SIZE]; char amount_images[INFO_BYTE_SIZE]; char rows[INFO_BYTE_SIZE]; char columns[INFO_BYTE_SIZE]; }images_info; //structure for labels header information typedef struct labels_info{ char magic_num_labels[INFO_BYTE_SIZE]; char amount_labels[INFO_BYTE_SIZE]; }labels_info; //Hexadecimal to integer static unsigned int mnist_bin_to_int(char *tmp){ int val = (tmp[0] << 24 | tmp[1] << 16 | tmp[2] << 8 | tmp[3] ); return val; } static int mnist_load(const char *image_filename, const char *label_filename, mnist_data **data_set,unsigned int *count){ images_info i_info; labels_info l_info; //opening the files FILE *images = fopen(image_filename,"rb"); FILE *labels = fopen(label_filename,"rb"); if(images==NULL||labels==NULL){ return -1; } //read header info fread(&i_info,sizeof(images_info),1,images); fread(&l_info,sizeof(labels_info),1,labels); //check and print header info int magic_num_images_as_int = mnist_bin_to_int(i_info.magic_num_images); if(magic_num_images_as_int != 2051){ printf("Problems with 'image magic number'. It is equal to %d, but should be 2051.",magic_num_images_as_int); return -1; } else{ printf("image magic number = %d (should be 2051)\n", magic_num_images_as_int); } int magic_num_labels_as_int = mnist_bin_to_int(l_info.magic_num_labels); if(magic_num_labels_as_int != 2049){ printf("Problems with 'label magic number'. It is equal to %d, but should be 2049.",magic_num_labels_as_int); return -1; } else{ printf("label magic number = %d (should be 2049)\n", magic_num_labels_as_int); } int amount_images_as_int = mnist_bin_to_int(i_info.amount_images); if(amount_images_as_int != 10000){ printf("Problems with 'image total number'. It is equal to %d, but should be 10000.",amount_images_as_int); return -1; } else{ printf("image total number = %d (should be 10000)\n", amount_images_as_int); } int amount_labels_as_int = mnist_bin_to_int(l_info.amount_labels); if(amount_labels_as_int != 10000){ printf("Problems with 'label total number'. It is equal to %d, but should be 10000.",amount_labels_as_int); return -1; } else{ printf("label total number = %d (should be 10000)\n", amount_labels_as_int); } int rows_as_int = mnist_bin_to_int(i_info.rows); int columns_as_int = mnist_bin_to_int(i_info.columns); if((rows_as_int != 28)||(columns_as_int!=28)){ printf("Problems with dimensions of images. Dimensions of images are not compitable with 28x28."); return -1; } else{ printf("rows = %d, cols = %d (both should be 28)\n", rows_as_int,columns_as_int); } unsigned char pixels[INSIZE][INSIZE]; char label; for(int k = 0;k<10000;k++){ //read current necessary data point fread(pixels,sizeof(pixels),1,images); fread(&label,sizeof(char),1,labels); //fill mnist_data struct -> data array with double values of pixels using cuda unsigned char (*dpixels)[INSIZE]; double (*ddata)[INSIZE]; hipMalloc((void**)&dpixels, INSIZE*INSIZE*sizeof(char)); hipMalloc((void**)&ddata, INSIZE*INSIZE*sizeof(double)); hipMemcpy(dpixels, pixels, INSIZE*INSIZE*sizeof(unsigned char), hipMemcpyHostToDevice); dim3 blocks(1,1); dim3 threads(INSIZE,INSIZE); hipLaunchKernelGGL(( fillArr), dim3(blocks), dim3(threads), 0, 0, dpixels,ddata); hipMemcpy((*data_set+*count)->data, ddata, INSIZE*INSIZE*sizeof(double), hipMemcpyDeviceToHost); hipFree(dpixels); hipFree(ddata); //assign mnist_data struct -> label with label (*data_set+*count)->label = (int)label; //increment count *count+=1; } //close files fclose(images); fclose(labels); return 0; } //Convolution layer. Filtering. __global__ void conv_filtering(float d_data[28][28], float d_weight[6][5][5], float d_filter_output[6][24][24]){ int local_row = threadIdx.y; int local_column = threadIdx.z; int feature = threadIdx.x; int global_row = blockIdx.x+threadIdx.y; int global_column = blockIdx.y+threadIdx.z; int output_row = blockIdx.x; int output_column = blockIdx.y; __shared__ float temp[FEATURES][CONV_FILTER][CONV_FILTER]; __shared__ float pre_sum[FEATURES][CONV_FILTER]; temp[feature][local_row][local_column] = d_data[global_row][global_column]*d_weight[feature][local_row][local_column]; __syncthreads(); if(local_column==0){ float temp_sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ temp_sum+=temp[feature][local_row][i]; } pre_sum[feature][local_row] = temp_sum; __syncthreads(); if(local_row==0){ float sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ sum+=pre_sum[feature][i]; } d_filter_output[feature][output_row][output_column] = sum; } } } //Convolution layer. Biasing. __global__ void conv_biasing(float d_filter_output[6][24][24], float d_bias[6], float d_bias_output[6][24][24]){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int feature = blockIdx.z; d_bias_output[feature][x][y] = d_filter_output[feature][x][y] + d_bias[feature]; } //Convolution layer. Sigmoid. __global__ void conv_sigmoid(float d_bias_output[6][24][24], float d_sigmoid_output[6][24][24]){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int feature = blockIdx.z; d_sigmoid_output[feature][x][y] = 1/(1+expf((-1)*d_bias_output[feature][x][y])); } //SubSampling layer. Filtering. __global__ void ss_filtering(float d_conv_output[6][24][24], float d_weight[4][4], float d_filter_output[6][6][6]){ int local_row = threadIdx.y; int local_column = threadIdx.z; int feature = threadIdx.x; int global_row = blockIdx.x*blockDim.y+threadIdx.y; int global_column = blockIdx.y*blockDim.z+threadIdx.z; int output_row = blockIdx.x; int output_column = blockIdx.y; __shared__ float temp[FEATURES][SS_FILTER][SS_FILTER]; temp[feature][local_row][local_column] = d_conv_output[feature][global_row][global_column]*d_weight[local_row][local_column]; __syncthreads(); if(local_row==0 && local_column==0){ float sum = 0.0f; for(int i = 0; i<SS_FILTER; i++){ for(int j =0; j<SS_FILTER; j++){ sum+=temp[feature][i][j]; } } d_filter_output[feature][output_row][output_column] = sum; } } //SubSampling layer. Biasing. __global__ void ss_biasing(float d_filter_output[6][6][6], float d_bias[1], float d_bias_output[6][6][6]){ int x = threadIdx.x; int y = threadIdx.y; int feature = blockIdx.x; d_bias_output[feature][x][y] = d_filter_output[feature][x][y] + d_bias[0]; } //SubSampling layer. Sigmoid. __global__ void ss_sigmoid(float d_bias_output[6][6][6], float d_sigmoid_output[6][6][6]){ int x = threadIdx.x; int y = threadIdx.y; int feature = blockIdx.x; d_sigmoid_output[feature][x][y] = 1/(1+expf((-1)*d_bias_output[feature][x][y])); } __global__ void fc_linear(float d_ss_output[6][6][6], float d_weight[10][6][6][6],float d_linear_output[10]){ const int neuron = blockIdx.x; const int depth = blockIdx.y*blockDim.x+threadIdx.x; const int local_depth = threadIdx.x; const int row = threadIdx.y; const int column = threadIdx.z; __shared__ float temp[3][6][6]; __shared__ float temp_sums[3][6]; __shared__ float pre_sums[3]; temp[local_depth][row][column] = d_ss_output[depth][row][column]*d_weight[neuron][depth][row][column]; __syncthreads(); if(column==0){ float temp_sum = 0.0f; for(int i = 0; i<6;i++){ temp_sum+=temp[local_depth][row][i]; } temp_sums[local_depth][row] = temp_sum; if(row==0){ float pre_sum = 0.0f; for(int i = 0; i<6;i++){ pre_sum+=temp_sums[local_depth][i]; } pre_sums[local_depth] = pre_sum; if(local_depth==0){ float sum = 0.0f; for(int i = 0; i<3;i++){ sum+=pre_sums[i]; } atomicAdd(&d_linear_output[neuron],sum); } } } } //Fully-connected layer.Biasing. __global__ void fc_biasing(float d_linear_output[10], float d_bias[10],float d_bias_output[10]){ const int idx = threadIdx.x; d_bias_output[idx] = d_linear_output[idx]+d_bias[idx]; } //Fully-connected layer.Sigmoid. __global__ void fc_sigmoid(float d_bias_output[10], float d_final_output[10]){ const int idx = threadIdx.x; d_final_output[idx] = 1/(1+expf((-1)*d_bias_output[idx])); } class Conv{ public: int filter_size, features_num, output_dim; float *weight, *bias,*filter_output, *bias_output, *final_output; Conv(int filter_size, int features_num, int output); void forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]); void get_filter_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_bias_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_final_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); ~Conv(); }; Conv::Conv(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; float w[features_num][filter_size][filter_size],b[features_num]; //Assigning all values of 'weight' and 'bias' to -1.0f for(int i = 0; i < features_num; i++){ b[i] = INITIAL_WEIGHT_VALUE; for(int j = 0; j < filter_size; j++){ for(int k = 0; k < filter_size; k++){ w[i][j][k] = INITIAL_WEIGHT_VALUE; } } } //CUDA memory allocation hipMalloc((void **)&weight, features_num*filter_size*filter_size*sizeof(float)); hipMemcpy(weight, w, features_num*filter_size*filter_size*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, features_num*sizeof(float)); hipMemcpy(bias, b, features_num*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&filter_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&bias_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&final_output, features_num*output_dim*output_dim*sizeof(float)); } Conv::~Conv(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(filter_output); hipFree(bias_output); hipFree(final_output); } class SS{ public: int filter_size, features_num, output_dim; float *weight, *bias,*filter_output, *bias_output, *final_output; SS(int filter_size, int features_num, int output); void forward_pass(float conv_output[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_filter_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_bias_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_final_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); ~SS(); }; SS::SS(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; float w[filter_size][filter_size],b[1]; //Assigning all values of 'weight' and 'bias' to -1.0f b[0] = INITIAL_WEIGHT_VALUE; for(int i = 0; i < filter_size; i++){ for(int j= 0; j < filter_size; j++){ w[i][j] = INITIAL_WEIGHT_VALUE; } } //CUDA memory allocation hipMalloc((void **)&weight, filter_size*filter_size*sizeof(float)); hipMemcpy(weight, w, filter_size*filter_size*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, filter_size*filter_size*sizeof(float)); hipMemcpy(bias, b, sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&filter_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&bias_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&final_output, features_num*output_dim*output_dim*sizeof(float)); } SS::~SS(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(filter_output); hipFree(bias_output); hipFree(final_output); } class FC{ public: int neurons, output_dim; float *weight, *bias,*linear_output, *bias_output, *final_output; FC(int neurons, int output); void forward_pass(float ss_output[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_linear_output(float res[FC_OUTPUT]); void get_bias_output(float res[FC_OUTPUT]); void get_final_output(float res[FC_OUTPUT]); ~FC(); }; FC::FC(int neurons, int output_dim){ //Assigning attributes this->neurons = neurons; this->output_dim = output_dim; float w[neurons][FEATURES][SS_OUTPUT][SS_OUTPUT],b[neurons],l_o[output_dim]; //Assigning all values of 'weight' and 'bias' to 1.0f for(int i = 0; i < neurons; i++){ b[i] = INITIAL_FC_WEIGHT_VALUE; l_o[i] = 0.0f; for(int j= 0; j < FEATURES; j++){ for(int k= 0; k < SS_OUTPUT; k++){ for(int l= 0; l < SS_OUTPUT; l++){ w[i][j][k][l] = INITIAL_FC_WEIGHT_VALUE; } } } } //CUDA memory allocation hipMalloc((void **)&weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float)); hipMemcpy(weight, w, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, neurons*sizeof(float)); hipMemcpy(bias, b, neurons*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&linear_output, output_dim*sizeof(float)); hipMemcpy(linear_output, l_o, output_dim*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias_output, output_dim*sizeof(float)); hipMalloc((void **)&final_output, output_dim*sizeof(float)); } FC::~FC(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(linear_output); hipFree(bias_output); hipFree(final_output); } //MaxError checker for convolution layer, prints "correct results" if everything is OK void conv_maxError(float arr[FEATURES][CONV_OUTPUT][CONV_OUTPUT],const char type[]){ float maxError = 0.0f; int i,j,k,stop = 0; float checker; if(strcmp(type,"conv_filtering") == 0){ checker = 25.0f; } else if(strcmp(type,"conv_biasing") == 0){ checker = 24.0f; } else if(strcmp(type,"conv_sigmoid") == 0){ checker = 1/(1+expf((-1)*(24))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<FEATURES; i++){ for(j=0; j<CONV_OUTPUT; j++){ for(k=0; k<CONV_OUTPUT; k++){ maxError = max(maxError, abs(arr[i][j][k] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); stop = 1; break; } } if(stop==1) break; } if(stop==1) break; } if(i==FEATURES&&j==CONV_OUTPUT&&k==CONV_OUTPUT) printf("Correct %s results\n", type); } //MaxError checker for subsampling layer, prints "correct results" if everything is OK void ss_maxError(float arr[FEATURES][SS_OUTPUT][SS_OUTPUT],const char type[]){ float maxError = 0.0f; int i,j,k,stop = 0; float checker; if(strcmp(type,"ss_filtering") == 0){ checker = -16.0f; } else if(strcmp(type,"ss_biasing") == 0){ checker = -17.0f; } else if(strcmp(type,"ss_sigmoid") == 0){ checker = 1/(1+expf((-1)*(-17))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<FEATURES; i++){ for(j=0; j<SS_OUTPUT; j++){ for(k=0; k<SS_OUTPUT; k++){ maxError = max(maxError,abs(arr[i][j][k] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); stop = 1; break; } } if(stop==1) break; } if(stop==1) break; } if(i==FEATURES&&j==SS_OUTPUT&&k==SS_OUTPUT) printf("Correct %s results\n", type); } //MaxError checker for fully-connected layer, prints "correct results" if everything is OK void fc_maxError(float arr[NEURONS],const char type[]){ float maxError = 0.0f; int i = 0; float checker; if(strcmp(type,"fc_linear") == 0){ checker = 216.0f; } else if(strcmp(type,"fc_biasing") == 0){ checker = 217.0f; } else if(strcmp(type,"fc_sigmoid") == 0){ checker = 1/(1+expf((-1)*(217))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<NEURONS; i++){ maxError = max(maxError, abs(arr[i] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); break; } } if(i==NEURONS) printf("Correct %s results\n", type); } //Forward pass void forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]){ Conv conv = Conv(CONV_FILTER, FEATURES, CONV_OUTPUT); SS ss = SS(SS_FILTER, FEATURES, SS_OUTPUT); FC fc = FC(NEURONS, FC_OUTPUT); float (*kernel_data)[IMAGE_HEIGHT]; hipMalloc((void**)&kernel_data,IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float)); hipMemcpy(kernel_data, data, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float), hipMemcpyHostToDevice); dim3 conv_filter_blocks(CONV_OUTPUT, CONV_OUTPUT); dim3 conv_filter_thread(FEATURES, CONV_FILTER, CONV_FILTER); hipLaunchKernelGGL(( conv_filtering), dim3(conv_filter_blocks), dim3(conv_filter_thread), 0, 0, kernel_data, (float (*)[CONV_FILTER][CONV_FILTER])conv.weight, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.filter_output); hipError_t conv_filter_checker = hipGetLastError(); if (conv_filter_checker!=hipSuccess){ printf("CONV FILTERING PROBLEM:: %s", hipGetErrorString(conv_filter_checker)); exit(1); } int conv_block_dim = CONV_OUTPUT/3; dim3 conv_bias_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_bias_thread(conv_block_dim,conv_block_dim); hipLaunchKernelGGL(( conv_biasing), dim3(conv_bias_blocks), dim3(conv_bias_thread), 0, 0, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.filter_output, conv.bias, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.bias_output); hipError_t conv_bias_checker = hipGetLastError(); if (conv_bias_checker!=hipSuccess){ printf("CONV BIASING PROBLEM:: %s", hipGetErrorString(conv_bias_checker)); exit(1); } dim3 conv_sigmoid_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_sigmoid_thread(conv_block_dim,conv_block_dim); hipLaunchKernelGGL(( conv_sigmoid), dim3(conv_sigmoid_blocks), dim3(conv_sigmoid_thread), 0, 0, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.bias_output, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.final_output); hipError_t conv_sigmoid_checker = hipGetLastError(); if (conv_sigmoid_checker!=hipSuccess){ printf("CONV SIGMOID PROBLEM:: %s", hipGetErrorString(conv_sigmoid_checker)); exit(1); } dim3 ss_filter_blocks(SS_OUTPUT, SS_OUTPUT); dim3 ss_filter_thread(FEATURES, SS_FILTER, SS_FILTER); hipLaunchKernelGGL(( ss_filtering), dim3(ss_filter_blocks), dim3(ss_filter_thread), 0, 0, (float (*)[24][24])conv.final_output, (float (*)[SS_FILTER])ss.weight, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.filter_output); hipError_t ss_filter_checker = hipGetLastError(); if (ss_filter_checker!=hipSuccess){ printf("SS FILTERING PROBLEM:: %s", hipGetErrorString(ss_filter_checker)); exit(1); } dim3 ss_bias_blocks(FEATURES); dim3 ss_bias_thread(SS_OUTPUT,SS_OUTPUT); hipLaunchKernelGGL(( ss_biasing), dim3(ss_bias_blocks), dim3(ss_bias_thread), 0, 0, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.filter_output, (float (*))ss.bias, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.bias_output); hipError_t ss_bias_checker = hipGetLastError(); if (ss_bias_checker!=hipSuccess){ printf("SS BIASING PROBLEM:: %s", hipGetErrorString(ss_bias_checker)); exit(1); } dim3 ss_sigmoid_blocks(FEATURES); dim3 ss_sigmoid_thread(SS_OUTPUT,SS_OUTPUT); hipLaunchKernelGGL(( ss_sigmoid), dim3(ss_sigmoid_blocks), dim3(ss_sigmoid_thread), 0, 0, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.bias_output, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.final_output); hipError_t ss_sigmoid_checker = hipGetLastError(); if (ss_sigmoid_checker!=hipSuccess){ printf("SS SIGMOID PROBLEM:: %s", hipGetErrorString(ss_sigmoid_checker)); exit(1); } float fc_data[6][6][6]; for(int i = 0; i<6;i++){ for(int j = 0; j<6;j++){ for(int k = 0; k<6;k++){ fc_data[i][j][k] = 1.0f; } } } float (*d_fc_data)[6][6]; hipMalloc((void**)&d_fc_data,6*6*6*sizeof(float)); hipMemcpy(d_fc_data, fc_data, 6*6*6*sizeof(float), hipMemcpyHostToDevice); int div = FEATURES/2; dim3 fc_linear_blocks(FC_OUTPUT, FEATURES/div); dim3 fc_linear_thread(div, SS_OUTPUT, SS_OUTPUT); hipLaunchKernelGGL(( fc_linear), dim3(fc_linear_blocks), dim3(fc_linear_thread), 0, 0, d_fc_data, (float (*)[FEATURES][SS_OUTPUT][SS_OUTPUT])fc.weight, fc.linear_output); hipError_t fc_linear_checker = hipGetLastError(); if (fc_linear_checker!=hipSuccess){ printf("FC LINEAR PROBLEM:: %s", hipGetErrorString(fc_linear_checker)); exit(1); } dim3 fc_bias_blocks(1); dim3 fc_bias_thread(NEURONS); hipLaunchKernelGGL(( fc_biasing), dim3(fc_bias_blocks), dim3(fc_bias_thread), 0, 0, fc.linear_output, fc.bias, fc.bias_output); hipError_t fc_bias_checker = hipGetLastError(); if (fc_bias_checker!=hipSuccess){ printf("FC BIASING PROBLEM:: %s", hipGetErrorString(fc_bias_checker)); exit(1); } dim3 fc_sigmoid_blocks(1); dim3 fc_sigmoid_thread(NEURONS); hipLaunchKernelGGL(( fc_sigmoid), dim3(fc_sigmoid_blocks), dim3(fc_sigmoid_thread), 0, 0, fc.bias_output, fc.final_output); hipError_t fc_sigmoid_checker = hipGetLastError(); if (fc_sigmoid_checker!=hipSuccess){ printf("FC SIGMOID PROBLEM:: %s", hipGetErrorString(fc_sigmoid_checker)); exit(1); } hipFree(kernel_data); float conv_filter_res[6][24][24],conv_bias_res[6][24][24],conv_final_res[6][24][24], ss_filter_res[6][6][6],ss_bias_res[6][6][6],ss_final_res[6][6][6], fc_linear_res[10],fc_bias_res[10],fc_final_res[10]; hipMemcpy(conv_filter_res, conv.filter_output, 6*24*24*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(conv_bias_res, conv.bias_output, 6*24*24*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(conv_final_res, conv.final_output, 6*24*24*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(ss_filter_res, ss.filter_output, 6*6*6*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(ss_bias_res, ss.bias_output, 6*6*6*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(ss_final_res, ss.final_output, 6*6*6*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(fc_linear_res, fc.linear_output, 10*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(fc_bias_res, fc.bias_output, 10*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(fc_final_res, fc.final_output, 10*sizeof(float), hipMemcpyDeviceToHost); conv_maxError(conv_filter_res,"conv_filtering"); conv_maxError(conv_bias_res,"conv_biasing"); conv_maxError(conv_final_res,"conv_sigmoid"); ss_maxError(ss_filter_res,"ss_filtering"); ss_maxError(ss_bias_res,"ss_biasing"); ss_maxError(ss_final_res,"ss_sigmoid"); fc_maxError(fc_linear_res,"fc_linear"); fc_maxError(fc_bias_res,"fc_biasing"); fc_maxError(fc_final_res,"fc_sigmoid"); } int main(){ const char *image_filename = "data/t10k-images.idx3-ubyte"; const char *label_filename = "data/t10k-labels.idx1-ubyte"; mnist_data *data_set = (mnist_data *)malloc(sizeof(*data_set)*10000); unsigned int count = 0; if(mnist_load(image_filename,label_filename, &data_set,&count)!=0){ printf("Problems with loading data."); exit(1); } printf("test_cnt = %d (should be 10000)\n\n",count); float data[IMAGE_HEIGHT][IMAGE_WIDTH]; for(int i = 0; i< IMAGE_HEIGHT;i++){ for(int j = 0; j< IMAGE_WIDTH;j++){ data_set[0].data[i][j] = -1.0; data[i][j] = data_set[0].data[i][j]; } } forward_pass(data); return 0; }
be950acb35e9eb48f8655f8dd110f9d22c86dbbd.cu
#include <stdio.h> #define INSIZE 28 #define INFO_BYTE_SIZE 4 #define INITIAL_WEIGHT_VALUE -1.0f #define INITIAL_FC_WEIGHT_VALUE 1.0f #define IMAGE_WIDTH 28 #define IMAGE_HEIGHT 28 #define CONV_FILTER 5 #define SS_FILTER 4 #define FEATURES 6 #define NEURONS 10 #define CONV_OUTPUT 24 #define SS_OUTPUT 6 #define FC_OUTPUT 10 //kernel function that fill mnist_data structure->data with normalized pixel values __global__ void fillArr(unsigned char pixels[INSIZE][INSIZE], double data[INSIZE][INSIZE]){ // TO DO int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE) data[i][j] = pixels[i][j]/255.0; } //kernel function that changes the values >0 to 1 and double type to integer type __global__ void showArr(double ddata[INSIZE][INSIZE], int dshow[INSIZE][INSIZE]){ // TO DO int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE){ if(ddata[i][j]>0) dshow[i][j] = 1; else dshow[i][j] = 0; } } //mnist data structure typedef struct mnist_data{ double data[INSIZE][INSIZE]; unsigned int label; }mnist_data; //structure for images header information typedef struct images_info{ char magic_num_images[INFO_BYTE_SIZE]; char amount_images[INFO_BYTE_SIZE]; char rows[INFO_BYTE_SIZE]; char columns[INFO_BYTE_SIZE]; }images_info; //structure for labels header information typedef struct labels_info{ char magic_num_labels[INFO_BYTE_SIZE]; char amount_labels[INFO_BYTE_SIZE]; }labels_info; //Hexadecimal to integer static unsigned int mnist_bin_to_int(char *tmp){ int val = (tmp[0] << 24 | tmp[1] << 16 | tmp[2] << 8 | tmp[3] ); return val; } static int mnist_load(const char *image_filename, const char *label_filename, mnist_data **data_set,unsigned int *count){ images_info i_info; labels_info l_info; //opening the files FILE *images = fopen(image_filename,"rb"); FILE *labels = fopen(label_filename,"rb"); if(images==NULL||labels==NULL){ return -1; } //read header info fread(&i_info,sizeof(images_info),1,images); fread(&l_info,sizeof(labels_info),1,labels); //check and print header info int magic_num_images_as_int = mnist_bin_to_int(i_info.magic_num_images); if(magic_num_images_as_int != 2051){ printf("Problems with 'image magic number'. It is equal to %d, but should be 2051.",magic_num_images_as_int); return -1; } else{ printf("image magic number = %d (should be 2051)\n", magic_num_images_as_int); } int magic_num_labels_as_int = mnist_bin_to_int(l_info.magic_num_labels); if(magic_num_labels_as_int != 2049){ printf("Problems with 'label magic number'. It is equal to %d, but should be 2049.",magic_num_labels_as_int); return -1; } else{ printf("label magic number = %d (should be 2049)\n", magic_num_labels_as_int); } int amount_images_as_int = mnist_bin_to_int(i_info.amount_images); if(amount_images_as_int != 10000){ printf("Problems with 'image total number'. It is equal to %d, but should be 10000.",amount_images_as_int); return -1; } else{ printf("image total number = %d (should be 10000)\n", amount_images_as_int); } int amount_labels_as_int = mnist_bin_to_int(l_info.amount_labels); if(amount_labels_as_int != 10000){ printf("Problems with 'label total number'. It is equal to %d, but should be 10000.",amount_labels_as_int); return -1; } else{ printf("label total number = %d (should be 10000)\n", amount_labels_as_int); } int rows_as_int = mnist_bin_to_int(i_info.rows); int columns_as_int = mnist_bin_to_int(i_info.columns); if((rows_as_int != 28)||(columns_as_int!=28)){ printf("Problems with dimensions of images. Dimensions of images are not compitable with 28x28."); return -1; } else{ printf("rows = %d, cols = %d (both should be 28)\n", rows_as_int,columns_as_int); } unsigned char pixels[INSIZE][INSIZE]; char label; for(int k = 0;k<10000;k++){ //read current necessary data point fread(pixels,sizeof(pixels),1,images); fread(&label,sizeof(char),1,labels); //fill mnist_data struct -> data array with double values of pixels using cuda unsigned char (*dpixels)[INSIZE]; double (*ddata)[INSIZE]; cudaMalloc((void**)&dpixels, INSIZE*INSIZE*sizeof(char)); cudaMalloc((void**)&ddata, INSIZE*INSIZE*sizeof(double)); cudaMemcpy(dpixels, pixels, INSIZE*INSIZE*sizeof(unsigned char), cudaMemcpyHostToDevice); dim3 blocks(1,1); dim3 threads(INSIZE,INSIZE); fillArr<<<blocks, threads>>>(dpixels,ddata); cudaMemcpy((*data_set+*count)->data, ddata, INSIZE*INSIZE*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dpixels); cudaFree(ddata); //assign mnist_data struct -> label with label (*data_set+*count)->label = (int)label; //increment count *count+=1; } //close files fclose(images); fclose(labels); return 0; } //Convolution layer. Filtering. __global__ void conv_filtering(float d_data[28][28], float d_weight[6][5][5], float d_filter_output[6][24][24]){ int local_row = threadIdx.y; int local_column = threadIdx.z; int feature = threadIdx.x; int global_row = blockIdx.x+threadIdx.y; int global_column = blockIdx.y+threadIdx.z; int output_row = blockIdx.x; int output_column = blockIdx.y; __shared__ float temp[FEATURES][CONV_FILTER][CONV_FILTER]; __shared__ float pre_sum[FEATURES][CONV_FILTER]; temp[feature][local_row][local_column] = d_data[global_row][global_column]*d_weight[feature][local_row][local_column]; __syncthreads(); if(local_column==0){ float temp_sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ temp_sum+=temp[feature][local_row][i]; } pre_sum[feature][local_row] = temp_sum; __syncthreads(); if(local_row==0){ float sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ sum+=pre_sum[feature][i]; } d_filter_output[feature][output_row][output_column] = sum; } } } //Convolution layer. Biasing. __global__ void conv_biasing(float d_filter_output[6][24][24], float d_bias[6], float d_bias_output[6][24][24]){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int feature = blockIdx.z; d_bias_output[feature][x][y] = d_filter_output[feature][x][y] + d_bias[feature]; } //Convolution layer. Sigmoid. __global__ void conv_sigmoid(float d_bias_output[6][24][24], float d_sigmoid_output[6][24][24]){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int feature = blockIdx.z; d_sigmoid_output[feature][x][y] = 1/(1+expf((-1)*d_bias_output[feature][x][y])); } //SubSampling layer. Filtering. __global__ void ss_filtering(float d_conv_output[6][24][24], float d_weight[4][4], float d_filter_output[6][6][6]){ int local_row = threadIdx.y; int local_column = threadIdx.z; int feature = threadIdx.x; int global_row = blockIdx.x*blockDim.y+threadIdx.y; int global_column = blockIdx.y*blockDim.z+threadIdx.z; int output_row = blockIdx.x; int output_column = blockIdx.y; __shared__ float temp[FEATURES][SS_FILTER][SS_FILTER]; temp[feature][local_row][local_column] = d_conv_output[feature][global_row][global_column]*d_weight[local_row][local_column]; __syncthreads(); if(local_row==0 && local_column==0){ float sum = 0.0f; for(int i = 0; i<SS_FILTER; i++){ for(int j =0; j<SS_FILTER; j++){ sum+=temp[feature][i][j]; } } d_filter_output[feature][output_row][output_column] = sum; } } //SubSampling layer. Biasing. __global__ void ss_biasing(float d_filter_output[6][6][6], float d_bias[1], float d_bias_output[6][6][6]){ int x = threadIdx.x; int y = threadIdx.y; int feature = blockIdx.x; d_bias_output[feature][x][y] = d_filter_output[feature][x][y] + d_bias[0]; } //SubSampling layer. Sigmoid. __global__ void ss_sigmoid(float d_bias_output[6][6][6], float d_sigmoid_output[6][6][6]){ int x = threadIdx.x; int y = threadIdx.y; int feature = blockIdx.x; d_sigmoid_output[feature][x][y] = 1/(1+expf((-1)*d_bias_output[feature][x][y])); } __global__ void fc_linear(float d_ss_output[6][6][6], float d_weight[10][6][6][6],float d_linear_output[10]){ const int neuron = blockIdx.x; const int depth = blockIdx.y*blockDim.x+threadIdx.x; const int local_depth = threadIdx.x; const int row = threadIdx.y; const int column = threadIdx.z; __shared__ float temp[3][6][6]; __shared__ float temp_sums[3][6]; __shared__ float pre_sums[3]; temp[local_depth][row][column] = d_ss_output[depth][row][column]*d_weight[neuron][depth][row][column]; __syncthreads(); if(column==0){ float temp_sum = 0.0f; for(int i = 0; i<6;i++){ temp_sum+=temp[local_depth][row][i]; } temp_sums[local_depth][row] = temp_sum; if(row==0){ float pre_sum = 0.0f; for(int i = 0; i<6;i++){ pre_sum+=temp_sums[local_depth][i]; } pre_sums[local_depth] = pre_sum; if(local_depth==0){ float sum = 0.0f; for(int i = 0; i<3;i++){ sum+=pre_sums[i]; } atomicAdd(&d_linear_output[neuron],sum); } } } } //Fully-connected layer.Biasing. __global__ void fc_biasing(float d_linear_output[10], float d_bias[10],float d_bias_output[10]){ const int idx = threadIdx.x; d_bias_output[idx] = d_linear_output[idx]+d_bias[idx]; } //Fully-connected layer.Sigmoid. __global__ void fc_sigmoid(float d_bias_output[10], float d_final_output[10]){ const int idx = threadIdx.x; d_final_output[idx] = 1/(1+expf((-1)*d_bias_output[idx])); } class Conv{ public: int filter_size, features_num, output_dim; float *weight, *bias,*filter_output, *bias_output, *final_output; Conv(int filter_size, int features_num, int output); void forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]); void get_filter_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_bias_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_final_output(float res[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); ~Conv(); }; Conv::Conv(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; float w[features_num][filter_size][filter_size],b[features_num]; //Assigning all values of 'weight' and 'bias' to -1.0f for(int i = 0; i < features_num; i++){ b[i] = INITIAL_WEIGHT_VALUE; for(int j = 0; j < filter_size; j++){ for(int k = 0; k < filter_size; k++){ w[i][j][k] = INITIAL_WEIGHT_VALUE; } } } //CUDA memory allocation cudaMalloc((void **)&weight, features_num*filter_size*filter_size*sizeof(float)); cudaMemcpy(weight, w, features_num*filter_size*filter_size*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, features_num*sizeof(float)); cudaMemcpy(bias, b, features_num*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&filter_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&bias_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&final_output, features_num*output_dim*output_dim*sizeof(float)); } Conv::~Conv(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(filter_output); cudaFree(bias_output); cudaFree(final_output); } class SS{ public: int filter_size, features_num, output_dim; float *weight, *bias,*filter_output, *bias_output, *final_output; SS(int filter_size, int features_num, int output); void forward_pass(float conv_output[FEATURES][CONV_OUTPUT][CONV_OUTPUT]); void get_filter_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_bias_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_final_output(float res[FEATURES][SS_OUTPUT][SS_OUTPUT]); ~SS(); }; SS::SS(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; float w[filter_size][filter_size],b[1]; //Assigning all values of 'weight' and 'bias' to -1.0f b[0] = INITIAL_WEIGHT_VALUE; for(int i = 0; i < filter_size; i++){ for(int j= 0; j < filter_size; j++){ w[i][j] = INITIAL_WEIGHT_VALUE; } } //CUDA memory allocation cudaMalloc((void **)&weight, filter_size*filter_size*sizeof(float)); cudaMemcpy(weight, w, filter_size*filter_size*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, filter_size*filter_size*sizeof(float)); cudaMemcpy(bias, b, sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&filter_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&bias_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&final_output, features_num*output_dim*output_dim*sizeof(float)); } SS::~SS(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(filter_output); cudaFree(bias_output); cudaFree(final_output); } class FC{ public: int neurons, output_dim; float *weight, *bias,*linear_output, *bias_output, *final_output; FC(int neurons, int output); void forward_pass(float ss_output[FEATURES][SS_OUTPUT][SS_OUTPUT]); void get_linear_output(float res[FC_OUTPUT]); void get_bias_output(float res[FC_OUTPUT]); void get_final_output(float res[FC_OUTPUT]); ~FC(); }; FC::FC(int neurons, int output_dim){ //Assigning attributes this->neurons = neurons; this->output_dim = output_dim; float w[neurons][FEATURES][SS_OUTPUT][SS_OUTPUT],b[neurons],l_o[output_dim]; //Assigning all values of 'weight' and 'bias' to 1.0f for(int i = 0; i < neurons; i++){ b[i] = INITIAL_FC_WEIGHT_VALUE; l_o[i] = 0.0f; for(int j= 0; j < FEATURES; j++){ for(int k= 0; k < SS_OUTPUT; k++){ for(int l= 0; l < SS_OUTPUT; l++){ w[i][j][k][l] = INITIAL_FC_WEIGHT_VALUE; } } } } //CUDA memory allocation cudaMalloc((void **)&weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float)); cudaMemcpy(weight, w, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, neurons*sizeof(float)); cudaMemcpy(bias, b, neurons*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&linear_output, output_dim*sizeof(float)); cudaMemcpy(linear_output, l_o, output_dim*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias_output, output_dim*sizeof(float)); cudaMalloc((void **)&final_output, output_dim*sizeof(float)); } FC::~FC(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(linear_output); cudaFree(bias_output); cudaFree(final_output); } //MaxError checker for convolution layer, prints "correct results" if everything is OK void conv_maxError(float arr[FEATURES][CONV_OUTPUT][CONV_OUTPUT],const char type[]){ float maxError = 0.0f; int i,j,k,stop = 0; float checker; if(strcmp(type,"conv_filtering") == 0){ checker = 25.0f; } else if(strcmp(type,"conv_biasing") == 0){ checker = 24.0f; } else if(strcmp(type,"conv_sigmoid") == 0){ checker = 1/(1+expf((-1)*(24))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<FEATURES; i++){ for(j=0; j<CONV_OUTPUT; j++){ for(k=0; k<CONV_OUTPUT; k++){ maxError = max(maxError, abs(arr[i][j][k] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); stop = 1; break; } } if(stop==1) break; } if(stop==1) break; } if(i==FEATURES&&j==CONV_OUTPUT&&k==CONV_OUTPUT) printf("Correct %s results\n", type); } //MaxError checker for subsampling layer, prints "correct results" if everything is OK void ss_maxError(float arr[FEATURES][SS_OUTPUT][SS_OUTPUT],const char type[]){ float maxError = 0.0f; int i,j,k,stop = 0; float checker; if(strcmp(type,"ss_filtering") == 0){ checker = -16.0f; } else if(strcmp(type,"ss_biasing") == 0){ checker = -17.0f; } else if(strcmp(type,"ss_sigmoid") == 0){ checker = 1/(1+expf((-1)*(-17))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<FEATURES; i++){ for(j=0; j<SS_OUTPUT; j++){ for(k=0; k<SS_OUTPUT; k++){ maxError = max(maxError,abs(arr[i][j][k] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); stop = 1; break; } } if(stop==1) break; } if(stop==1) break; } if(i==FEATURES&&j==SS_OUTPUT&&k==SS_OUTPUT) printf("Correct %s results\n", type); } //MaxError checker for fully-connected layer, prints "correct results" if everything is OK void fc_maxError(float arr[NEURONS],const char type[]){ float maxError = 0.0f; int i = 0; float checker; if(strcmp(type,"fc_linear") == 0){ checker = 216.0f; } else if(strcmp(type,"fc_biasing") == 0){ checker = 217.0f; } else if(strcmp(type,"fc_sigmoid") == 0){ checker = 1/(1+expf((-1)*(217))); } else{ printf("Problems with array type to check."); exit(1); } for(i=0; i<NEURONS; i++){ maxError = max(maxError, abs(arr[i] - checker)); if(maxError!=0.0f){ printf("Incorrect %s results\n", type); break; } } if(i==NEURONS) printf("Correct %s results\n", type); } //Forward pass void forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]){ Conv conv = Conv(CONV_FILTER, FEATURES, CONV_OUTPUT); SS ss = SS(SS_FILTER, FEATURES, SS_OUTPUT); FC fc = FC(NEURONS, FC_OUTPUT); float (*kernel_data)[IMAGE_HEIGHT]; cudaMalloc((void**)&kernel_data,IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float)); cudaMemcpy(kernel_data, data, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float), cudaMemcpyHostToDevice); dim3 conv_filter_blocks(CONV_OUTPUT, CONV_OUTPUT); dim3 conv_filter_thread(FEATURES, CONV_FILTER, CONV_FILTER); conv_filtering<<<conv_filter_blocks, conv_filter_thread>>>(kernel_data, (float (*)[CONV_FILTER][CONV_FILTER])conv.weight, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.filter_output); cudaError_t conv_filter_checker = cudaGetLastError(); if (conv_filter_checker!=cudaSuccess){ printf("CONV FILTERING PROBLEM:: %s", cudaGetErrorString(conv_filter_checker)); exit(1); } int conv_block_dim = CONV_OUTPUT/3; dim3 conv_bias_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_bias_thread(conv_block_dim,conv_block_dim); conv_biasing<<<conv_bias_blocks, conv_bias_thread>>>((float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.filter_output, conv.bias, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.bias_output); cudaError_t conv_bias_checker = cudaGetLastError(); if (conv_bias_checker!=cudaSuccess){ printf("CONV BIASING PROBLEM:: %s", cudaGetErrorString(conv_bias_checker)); exit(1); } dim3 conv_sigmoid_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_sigmoid_thread(conv_block_dim,conv_block_dim); conv_sigmoid<<<conv_sigmoid_blocks, conv_sigmoid_thread>>>((float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.bias_output, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.final_output); cudaError_t conv_sigmoid_checker = cudaGetLastError(); if (conv_sigmoid_checker!=cudaSuccess){ printf("CONV SIGMOID PROBLEM:: %s", cudaGetErrorString(conv_sigmoid_checker)); exit(1); } dim3 ss_filter_blocks(SS_OUTPUT, SS_OUTPUT); dim3 ss_filter_thread(FEATURES, SS_FILTER, SS_FILTER); ss_filtering<<<ss_filter_blocks, ss_filter_thread>>>((float (*)[24][24])conv.final_output, (float (*)[SS_FILTER])ss.weight, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.filter_output); cudaError_t ss_filter_checker = cudaGetLastError(); if (ss_filter_checker!=cudaSuccess){ printf("SS FILTERING PROBLEM:: %s", cudaGetErrorString(ss_filter_checker)); exit(1); } dim3 ss_bias_blocks(FEATURES); dim3 ss_bias_thread(SS_OUTPUT,SS_OUTPUT); ss_biasing<<<ss_bias_blocks, ss_bias_thread>>>((float (*)[SS_OUTPUT][SS_OUTPUT])ss.filter_output, (float (*))ss.bias, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.bias_output); cudaError_t ss_bias_checker = cudaGetLastError(); if (ss_bias_checker!=cudaSuccess){ printf("SS BIASING PROBLEM:: %s", cudaGetErrorString(ss_bias_checker)); exit(1); } dim3 ss_sigmoid_blocks(FEATURES); dim3 ss_sigmoid_thread(SS_OUTPUT,SS_OUTPUT); ss_sigmoid<<<ss_sigmoid_blocks, ss_sigmoid_thread>>>((float (*)[SS_OUTPUT][SS_OUTPUT])ss.bias_output, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.final_output); cudaError_t ss_sigmoid_checker = cudaGetLastError(); if (ss_sigmoid_checker!=cudaSuccess){ printf("SS SIGMOID PROBLEM:: %s", cudaGetErrorString(ss_sigmoid_checker)); exit(1); } float fc_data[6][6][6]; for(int i = 0; i<6;i++){ for(int j = 0; j<6;j++){ for(int k = 0; k<6;k++){ fc_data[i][j][k] = 1.0f; } } } float (*d_fc_data)[6][6]; cudaMalloc((void**)&d_fc_data,6*6*6*sizeof(float)); cudaMemcpy(d_fc_data, fc_data, 6*6*6*sizeof(float), cudaMemcpyHostToDevice); int div = FEATURES/2; dim3 fc_linear_blocks(FC_OUTPUT, FEATURES/div); dim3 fc_linear_thread(div, SS_OUTPUT, SS_OUTPUT); fc_linear<<<fc_linear_blocks, fc_linear_thread>>>(d_fc_data, (float (*)[FEATURES][SS_OUTPUT][SS_OUTPUT])fc.weight, fc.linear_output); cudaError_t fc_linear_checker = cudaGetLastError(); if (fc_linear_checker!=cudaSuccess){ printf("FC LINEAR PROBLEM:: %s", cudaGetErrorString(fc_linear_checker)); exit(1); } dim3 fc_bias_blocks(1); dim3 fc_bias_thread(NEURONS); fc_biasing<<<fc_bias_blocks, fc_bias_thread>>>(fc.linear_output, fc.bias, fc.bias_output); cudaError_t fc_bias_checker = cudaGetLastError(); if (fc_bias_checker!=cudaSuccess){ printf("FC BIASING PROBLEM:: %s", cudaGetErrorString(fc_bias_checker)); exit(1); } dim3 fc_sigmoid_blocks(1); dim3 fc_sigmoid_thread(NEURONS); fc_sigmoid<<<fc_sigmoid_blocks, fc_sigmoid_thread>>>(fc.bias_output, fc.final_output); cudaError_t fc_sigmoid_checker = cudaGetLastError(); if (fc_sigmoid_checker!=cudaSuccess){ printf("FC SIGMOID PROBLEM:: %s", cudaGetErrorString(fc_sigmoid_checker)); exit(1); } cudaFree(kernel_data); float conv_filter_res[6][24][24],conv_bias_res[6][24][24],conv_final_res[6][24][24], ss_filter_res[6][6][6],ss_bias_res[6][6][6],ss_final_res[6][6][6], fc_linear_res[10],fc_bias_res[10],fc_final_res[10]; cudaMemcpy(conv_filter_res, conv.filter_output, 6*24*24*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(conv_bias_res, conv.bias_output, 6*24*24*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(conv_final_res, conv.final_output, 6*24*24*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(ss_filter_res, ss.filter_output, 6*6*6*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(ss_bias_res, ss.bias_output, 6*6*6*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(ss_final_res, ss.final_output, 6*6*6*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(fc_linear_res, fc.linear_output, 10*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(fc_bias_res, fc.bias_output, 10*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(fc_final_res, fc.final_output, 10*sizeof(float), cudaMemcpyDeviceToHost); conv_maxError(conv_filter_res,"conv_filtering"); conv_maxError(conv_bias_res,"conv_biasing"); conv_maxError(conv_final_res,"conv_sigmoid"); ss_maxError(ss_filter_res,"ss_filtering"); ss_maxError(ss_bias_res,"ss_biasing"); ss_maxError(ss_final_res,"ss_sigmoid"); fc_maxError(fc_linear_res,"fc_linear"); fc_maxError(fc_bias_res,"fc_biasing"); fc_maxError(fc_final_res,"fc_sigmoid"); } int main(){ const char *image_filename = "data/t10k-images.idx3-ubyte"; const char *label_filename = "data/t10k-labels.idx1-ubyte"; mnist_data *data_set = (mnist_data *)malloc(sizeof(*data_set)*10000); unsigned int count = 0; if(mnist_load(image_filename,label_filename, &data_set,&count)!=0){ printf("Problems with loading data."); exit(1); } printf("test_cnt = %d (should be 10000)\n\n",count); float data[IMAGE_HEIGHT][IMAGE_WIDTH]; for(int i = 0; i< IMAGE_HEIGHT;i++){ for(int j = 0; j< IMAGE_WIDTH;j++){ data_set[0].data[i][j] = -1.0; data[i][j] = data_set[0].data[i][j]; } } forward_pass(data); return 0; }
0159734cd3056b085e567659167624713752d5d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define BLOCK_SIZE 512 // **===--------------------- Modify this function -----------------------===** //! @param g_data input data in global memory // result is expected in index 0 of g_data //! @param n input number of elements to reduce from input data // **===------------------------------------------------------------------===** __global__ void reduction(unsigned int *g_data, int n) { __shared__ unsigned int partialSum[2*BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int i = 2 * blockIdx.x * BLOCK_SIZE; if (i + t < n) partialSum[t] = g_data[i+t]; else partialSum[t] = 0; if (i + BLOCK_SIZE + t < n) partialSum[BLOCK_SIZE + t] = g_data[i + BLOCK_SIZE + t]; else partialSum[BLOCK_SIZE + t] = 0; for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t+stride]; } if (t == 0){ g_data[blockIdx.x] = partialSum[0]; } } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
0159734cd3056b085e567659167624713752d5d2.cu
#ifndef _SCAN_NAIVE_KERNEL_H_ #define _SCAN_NAIVE_KERNEL_H_ #define BLOCK_SIZE 512 // **===--------------------- Modify this function -----------------------===** //! @param g_data input data in global memory // result is expected in index 0 of g_data //! @param n input number of elements to reduce from input data // **===------------------------------------------------------------------===** __global__ void reduction(unsigned int *g_data, int n) { __shared__ unsigned int partialSum[2*BLOCK_SIZE]; unsigned int t = threadIdx.x; unsigned int i = 2 * blockIdx.x * BLOCK_SIZE; if (i + t < n) partialSum[t] = g_data[i+t]; else partialSum[t] = 0; if (i + BLOCK_SIZE + t < n) partialSum[BLOCK_SIZE + t] = g_data[i + BLOCK_SIZE + t]; else partialSum[BLOCK_SIZE + t] = 0; for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t+stride]; } if (t == 0){ g_data[blockIdx.x] = partialSum[0]; } } #endif // #ifndef _SCAN_NAIVE_KERNEL_H_
matrix_add_gpu.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <algorithm> // Change the code here: // This should be changed to GPU kernel definition void matAdd(int width, int height, const float* A, const float* B, float* C) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index = i*width + j; C[index] = A[index] + B[index]; } } } int main() { int width = 1000; int height = 100; int numElements = width*height; float* A = (float*)calloc(numElements, sizeof(float)); float* B = (float*)calloc(numElements, sizeof(float)); float* C = (float*)calloc(numElements, sizeof(float)); srand(1214134); for (int i = 0; i < numElements; i++) { A[i] = float(rand())/float(RAND_MAX + 1.0); B[i] = float(rand())/float(RAND_MAX + 1.0); } // Insert your code here: // 1. Create GPU device buffers // 2. Copy input data from host to device (matrixes A and B) // 3. Change the CPU function call to the GPU kernel call matAdd(width, height, A, B, C); // 4. Copy the result back (matrix C) for (int i = 0; i < ::min(5, height); i++) { for (int j = 0; j < ::min(5, width); j++) { int index = i*width + j; printf("%3.2f + %3.2f = %3.2f;\t", A[index], B[index], C[index]); } printf("...\n"); } printf("...\n"); free(A); free(B); free(C); // Free GPU memory here return 0; }
matrix_add_gpu.cu
#include <stdio.h> #include <stdlib.h> #include <algorithm> // Change the code here: // This should be changed to GPU kernel definition void matAdd(int width, int height, const float* A, const float* B, float* C) { for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { int index = i*width + j; C[index] = A[index] + B[index]; } } } int main() { int width = 1000; int height = 100; int numElements = width*height; float* A = (float*)calloc(numElements, sizeof(float)); float* B = (float*)calloc(numElements, sizeof(float)); float* C = (float*)calloc(numElements, sizeof(float)); srand(1214134); for (int i = 0; i < numElements; i++) { A[i] = float(rand())/float(RAND_MAX + 1.0); B[i] = float(rand())/float(RAND_MAX + 1.0); } // Insert your code here: // 1. Create GPU device buffers // 2. Copy input data from host to device (matrixes A and B) // 3. Change the CPU function call to the GPU kernel call matAdd(width, height, A, B, C); // 4. Copy the result back (matrix C) for (int i = 0; i < std::min(5, height); i++) { for (int j = 0; j < std::min(5, width); j++) { int index = i*width + j; printf("%3.2f + %3.2f = %3.2f;\t", A[index], B[index], C[index]); } printf("...\n"); } printf("...\n"); free(A); free(B); free(C); // Free GPU memory here return 0; }
8487a1b0e5c2ce9c5a84dbd026cc29632753a579.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( hipMalloc((void **) &w_d, w_size) ); gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( hipMalloc((void **) &G_d, G_size) ); gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( hipMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. hipLaunchKernelGGL(( spin), dim3((n*n+threadsNum-1)/threadsNum),dim3(threadsNum), 0, 0, G_d, w_d, newG_d, n); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); hipLaunchKernelGGL(( check), dim3((n*n+threadsNum-1)/threadsNum),dim3(threadsNum), 0, 0, G_d, newG_d, n, same_d); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipDeviceSynchronize() ); gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( hipFree(w_d) ); gpuErrchk( hipFree(G_d) ); gpuErrchk( hipFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
8487a1b0e5c2ce9c5a84dbd026cc29632753a579.cu
/* * ising_cuda_v1.cu * * Created on: Dec 26, 2019 * Author: Charalampos Eleftheriadis */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 512 #define threadsNum 64 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel Function. __global__ void spin(int *G, double *w, int *newG, int n) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; double weightSum = 0; // Calculates weight contribution for each neighboring Atomic Spin and sums it. weightSum += w[0] * G[((index/n - 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[1] * G[((index/n - 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[2] * G[((index/n - 2 + n)%n) * n + (index)%n]; weightSum += w[3] * G[((index/n - 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[4] * G[((index/n - 2 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[5] * G[((index/n - 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[6] * G[((index/n - 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[7] * G[((index/n - 1 + n)%n) * n + (index)%n]; weightSum += w[8] * G[((index/n - 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[9] * G[((index/n - 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[10] * G[((index/n + n)%n) * n + (index - 2 + n)%n]; weightSum += w[11] * G[((index/n + n)%n) * n + (index - 1 + n)%n]; // w[12] is not contributing anything. It's the current Atomic Spin. weightSum += w[13] * G[((index/n + n)%n) * n + (index + 1 + n)%n]; weightSum += w[14] * G[((index/n + n)%n) * n + (index + 2 + n)%n]; weightSum += w[15] * G[((index/n + 1 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[16] * G[((index/n + 1 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[17] * G[((index/n + 1 + n)%n) * n + (index)%n]; weightSum += w[18] * G[((index/n + 1 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[19] * G[((index/n + 1 + n)%n) * n + (index + 2 + n)%n]; weightSum += w[20] * G[((index/n + 2 + n)%n) * n + (index - 2 + n)%n]; weightSum += w[21] * G[((index/n + 2 + n)%n) * n + (index - 1 + n)%n]; weightSum += w[22] * G[((index/n + 2 + n)%n) * n + (index)%n]; weightSum += w[23] * G[((index/n + 2 + n)%n) * n + (index + 1 + n)%n]; weightSum += w[24] * G[((index/n + 2 + n)%n) * n + (index + 2 + n)%n]; //! Can it be done more efficiently? if (weightSum > 0.0001) newG[index] = 1; else if (weightSum < -0.0001) newG[index] = -1; else newG[index] = G[index]; } // Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one. __global__ void check(int *G, int *newG, int n, int *same) { // Calculates Atomic Spin index. int index = blockIdx.x*blockDim.x + threadIdx.x; // Checks for out of bounds indexing and if so quits. if (index >= n*n) return; if (G[index] != newG[index]) *same = 0; } void ising(int *G, double *w, int k, int n) { // Creates and transfers the Weight Matrix to GPU memory. double *w_d; int w_size = 25*sizeof(double); gpuErrchk( cudaMalloc((void **) &w_d, w_size) ); gpuErrchk( cudaMemcpy(w_d, w, w_size, cudaMemcpyHostToDevice) ); // Creates and transfers the Atomic Spins Matrix to GPU memory. int *G_d; int G_size = n*n*sizeof(int); gpuErrchk( cudaMalloc((void **) &G_d, G_size) ); gpuErrchk( cudaMemcpy(G_d, G, G_size, cudaMemcpyHostToDevice) ); // Creates the new Atomic Spins Matrix to GPU memory. int *newG_d; gpuErrchk( cudaMalloc((void **) &newG_d, G_size) ); // Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory. int same = 1; int *same_d; gpuErrchk( cudaMalloc((void **) &same_d, sizeof(int)) ); gpuErrchk( cudaMemcpy(same_d, &same, sizeof(int), cudaMemcpyHostToDevice) ); // Creates a temporary variable for Atomic Spins Matrices' pointers swapping. int *temp_d; // Checks if function has to be iterated. for (int i=0; i<k; i++) { // Calls the kernel function balancing load to (n*n+threadsNum-1)/threadsNum blocks with threadsNum threads each. spin<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); check<<<(n*n+threadsNum-1)/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaMemcpy(&same, same_d, sizeof(int), cudaMemcpyDeviceToHost) ); if (same) break; // Atomix Spin Matrices' pointers swapping. temp_d = G_d; G_d = newG_d; newG_d = temp_d; } // Copies data from GPU to CPU memory. gpuErrchk( cudaMemcpy(G, G_d, G_size, cudaMemcpyDeviceToHost) ); // Cleanup. gpuErrchk( cudaFree(w_d) ); gpuErrchk( cudaFree(G_d) ); gpuErrchk( cudaFree(newG_d) ); } int main() { // Weight Matrix. double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004, 0.016, 0.071, 0.117, 0.071, 0.016, 0.026, 0.117, 0.000, 0.117, 0.026, 0.016, 0.071, 0.117, 0.071, 0.016, 0.004, 0.016, 0.026, 0.016, 0.004 }; // Number of dimensions for the square Atomic Spins Matrix. int n = N; // Allocates memory for the Atomic Spins Matrix. int *G = (int *)malloc(n*n * sizeof(int)); // Randomizes seed. srand(time(NULL)); // Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution. for (int i=0; i<n*n; i++) G[i] = ((rand() % 2) * 2) - 1; /* // Reads configuration file. size_t readStatus; FILE *conf_init = fopen("conf-init.bin","rb"); int initG[n*n]; readStatus = fread(&initG, sizeof(int), n*n, conf_init); if (readStatus != n*n) printf("Could not read conf-init.bin file.\n"); fclose(conf_init); // Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file. for (int i=0; i<n*n; i++) G[i] = initG[i]; */ ising(G, w, 10, n); /* // Reads configuration file for state after one iteration. size_t readStatus1; FILE *conf_1 = fopen("conf-1.bin","rb"); int G1[n*n]; readStatus1 = fread(&G1, sizeof(int), n*n, conf_1); if (readStatus1 != n*n) printf("Could not read conf-1.bin file.\n"); fclose(conf_1); // Checks for errors. int errorsNum = 0; for (int i=0; i<n; i++) for (int j=0; j<n; j++) if (G[i*n+j] != G1[i*n+j]) errorsNum++; if (errorsNum == 0) printf("Correct Results!\n"); else printf("Wrong Results. Number of errors: %d\n", errorsNum); // Checks the results. for (int i=0; i<n; i++) { for (int j=0; j<n; j++) { if (G[i*n+j] == G1[i*n+j]) printf("="); else printf("!"); } printf("\n"); } printf("\n\n"); */ return 0; }
d147f841478f635070753ce33947c0aa616fd7d8.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "matx.h" #include <cassert> #include <cstdio> #include <memory> using namespace matx; #define FFT_TYPE HIPFFT_C2C int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) { MATX_ENTER_HANDLER(); using complex = cuda::std::complex<float>; index_t num_samp = 100000000; index_t num_samp_resamp = 100000; index_t N = ::min(num_samp, num_samp_resamp); index_t nyq = N / 2 + 1; constexpr uint32_t num_iterations = 100; float time_ms; hipStream_t stream; hipStreamCreate(&stream); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Create data objects and views tensor_t<float, 1> sigView({num_samp}); tensor_t<complex, 1> sigViewComplex({num_samp / 2 + 1}); tensor_t<float, 1> resampView({num_samp_resamp}); (sigView = random<float>({num_samp}, NORMAL)).run(stream); (sigViewComplex = fft(sigView)).run(stream); // Slice auto sliceView = sigViewComplex.Slice({0}, {nyq}); // Inverse Transform - FFT size based on output (resampView = ifft(sliceView)).run(stream); hipEventRecord(start, stream); for (uint32_t i = 0; i < num_iterations; i++) { // Launch 1D FFT (sigViewComplex = fft(sigView)).run(stream); // Slice auto sv = sigViewComplex.Slice({0}, {nyq}); // Inverse Transform - FFT size based on output (resampView = ifft(sv)).run(stream); } hipEventRecord(stop, stream); hipStreamSynchronize(stream); hipEventElapsedTime(&time_ms, start, stop); printf("Resample Kernel Time = %.2fms per iteration\n", time_ms / num_iterations); hipEventDestroy(start); hipEventDestroy(stop); hipStreamDestroy(stream); CUDA_CHECK_LAST_ERROR(); MATX_EXIT_HANDLER(); }
d147f841478f635070753ce33947c0aa616fd7d8.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "matx.h" #include <cassert> #include <cstdio> #include <memory> using namespace matx; #define FFT_TYPE CUFFT_C2C int main([[maybe_unused]] int argc, [[maybe_unused]] char **argv) { MATX_ENTER_HANDLER(); using complex = cuda::std::complex<float>; index_t num_samp = 100000000; index_t num_samp_resamp = 100000; index_t N = std::min(num_samp, num_samp_resamp); index_t nyq = N / 2 + 1; constexpr uint32_t num_iterations = 100; float time_ms; cudaStream_t stream; cudaStreamCreate(&stream); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Create data objects and views tensor_t<float, 1> sigView({num_samp}); tensor_t<complex, 1> sigViewComplex({num_samp / 2 + 1}); tensor_t<float, 1> resampView({num_samp_resamp}); (sigView = random<float>({num_samp}, NORMAL)).run(stream); (sigViewComplex = fft(sigView)).run(stream); // Slice auto sliceView = sigViewComplex.Slice({0}, {nyq}); // Inverse Transform - FFT size based on output (resampView = ifft(sliceView)).run(stream); cudaEventRecord(start, stream); for (uint32_t i = 0; i < num_iterations; i++) { // Launch 1D FFT (sigViewComplex = fft(sigView)).run(stream); // Slice auto sv = sigViewComplex.Slice({0}, {nyq}); // Inverse Transform - FFT size based on output (resampView = ifft(sv)).run(stream); } cudaEventRecord(stop, stream); cudaStreamSynchronize(stream); cudaEventElapsedTime(&time_ms, start, stop); printf("Resample Kernel Time = %.2fms per iteration\n", time_ms / num_iterations); cudaEventDestroy(start); cudaEventDestroy(stop); cudaStreamDestroy(stream); CUDA_CHECK_LAST_ERROR(); MATX_EXIT_HANDLER(); }
662d06ff904b89029f2d73b627afb477003c7281.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "imgGray.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *d_image = NULL; hipMalloc(&d_image, XSIZE*YSIZE); unsigned char *d_imagegray = NULL; hipMalloc(&d_imagegray, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( imgGray), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,d_imagegray,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( imgGray), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,d_imagegray,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( imgGray), dim3(gridBlock),dim3(threadBlock), 0, 0, d_image,d_imagegray,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
662d06ff904b89029f2d73b627afb477003c7281.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "imgGray.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *d_image = NULL; cudaMalloc(&d_image, XSIZE*YSIZE); unsigned char *d_imagegray = NULL; cudaMalloc(&d_imagegray, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); imgGray<<<gridBlock,threadBlock>>>(d_image,d_imagegray,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { imgGray<<<gridBlock,threadBlock>>>(d_image,d_imagegray,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { imgGray<<<gridBlock,threadBlock>>>(d_image,d_imagegray,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
30717b512dff16d2712cccc95cc5748fc168c8b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMath.h" #include <iostream> using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void priorBoxKernel( PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const float* minSize, const float* maxSize, const float* aspectRatios, float* outputData) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = H * W * numPriors; const bool haveMaxSize = param.numMaxSize > 0; const int dimAR = (haveMaxSize ? 1 : 0) + numAspectRatios; for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < dim; i += gridDim.x * nthdsPerCTA) { const int w = (i / numPriors) % W; const int h = (i / numPriors) / W; // Usually param.offset == 0.5 // Calucate the center of prior box at the input image scale const float centerX = (w + param.offset) * param.stepW; const float centerY = (h + param.offset) * param.stepH; // Minimum size index const int minSizeId = (i / dimAR) % param.numMinSize; // Aspect ratio index const int arId = i % dimAR; // Generate square pior box of aspect ratio of 1.0, edge length of minSize[minSizeId] if (arId == 0) { const float boxW = minSize[minSizeId]; const float boxH = boxW; float x, y, z, w; // Calculate [x_topleft, y_topleft, x_bottomright, y_bottomright] // Coordinates were scaled to [0, 1] against the width or height of the original input image x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; // If we decided to clip the prior box make sure all the bounding box are inside the original input image if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } // Copy the bounding box coordinates to output outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } // If have maxSize // Generate square pior box for aspect ratio of 1.0, edge length of sqrt(minSize[minSizeId] * maxSize[minSizeId]) // Described in SSD paper page 6 else if (haveMaxSize && arId == 1) { const float boxW = sqrt(minSize[minSizeId] * maxSize[minSizeId]); const float boxH = boxW; float x, y, z, w; x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } // Generate other bouding boxes with aspect ratios of not one. else { const int arOffset = haveMaxSize ? arId - 1 : arId; // skip aspectRatios[0] which is 1 const float boxW = minSize[minSizeId] * sqrt(aspectRatios[arOffset]); const float boxH = minSize[minSizeId] / sqrt(aspectRatios[arOffset]); float x, y, z, w; x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } } // Simply copy variance to from the parameter to output float* output = outputData + dim * 4; for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < dim; i += gridDim.x * nthdsPerCTA) { float x, y, z, w; x = param.variance[0]; y = param.variance[1]; z = param.variance[2]; w = param.variance[3]; output[i * 4] = x; output[i * 4 + 1] = y; output[i * 4 + 2] = z; output[i * 4 + 3] = w; } } pluginStatus_t priorBoxGpu( hipStream_t stream, const PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const void* minSize, const void* maxSize, const void* aspectRatios, void* outputData) { const int dim = H * W * numPriors; if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( priorBoxKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, H, W, numPriors, numAspectRatios, (const float*) minSize, (const float*) maxSize, (const float*) aspectRatios, (float*) outputData); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; hipLaunchKernelGGL(( priorBoxKernel<BS>), dim3(GS), dim3(BS), 0, stream, param, H, W, numPriors, numAspectRatios, (const float*) minSize, (const float*) maxSize, (const float*) aspectRatios, (float*) outputData); CSC(hipGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } } pluginStatus_t priorBoxInference( hipStream_t stream, const PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const void* minSize, const void* maxSize, const void* aspectRatios, void* outputData) { ASSERT(param.numMaxSize >= 0); if (param.numMaxSize) return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios, minSize, maxSize, aspectRatios, outputData); else return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios, minSize, nullptr, aspectRatios, outputData); }
30717b512dff16d2712cccc95cc5748fc168c8b0.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "kernel.h" #include "reducedMath.h" #include <iostream> using nvinfer1::rt::reduced_divisor; template <unsigned nthdsPerCTA> __launch_bounds__(nthdsPerCTA) __global__ void priorBoxKernel( PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const float* minSize, const float* maxSize, const float* aspectRatios, float* outputData) { // output dims: (H, W, param.numMinSize, (1+haveMaxSize+numAR-1), 4) const int dim = H * W * numPriors; const bool haveMaxSize = param.numMaxSize > 0; const int dimAR = (haveMaxSize ? 1 : 0) + numAspectRatios; for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < dim; i += gridDim.x * nthdsPerCTA) { const int w = (i / numPriors) % W; const int h = (i / numPriors) / W; // Usually param.offset == 0.5 // Calucate the center of prior box at the input image scale const float centerX = (w + param.offset) * param.stepW; const float centerY = (h + param.offset) * param.stepH; // Minimum size index const int minSizeId = (i / dimAR) % param.numMinSize; // Aspect ratio index const int arId = i % dimAR; // Generate square pior box of aspect ratio of 1.0, edge length of minSize[minSizeId] if (arId == 0) { const float boxW = minSize[minSizeId]; const float boxH = boxW; float x, y, z, w; // Calculate [x_topleft, y_topleft, x_bottomright, y_bottomright] // Coordinates were scaled to [0, 1] against the width or height of the original input image x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; // If we decided to clip the prior box make sure all the bounding box are inside the original input image if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } // Copy the bounding box coordinates to output outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } // If have maxSize // Generate square pior box for aspect ratio of 1.0, edge length of sqrt(minSize[minSizeId] * maxSize[minSizeId]) // Described in SSD paper page 6 else if (haveMaxSize && arId == 1) { const float boxW = sqrt(minSize[minSizeId] * maxSize[minSizeId]); const float boxH = boxW; float x, y, z, w; x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } // Generate other bouding boxes with aspect ratios of not one. else { const int arOffset = haveMaxSize ? arId - 1 : arId; // skip aspectRatios[0] which is 1 const float boxW = minSize[minSizeId] * sqrt(aspectRatios[arOffset]); const float boxH = minSize[minSizeId] / sqrt(aspectRatios[arOffset]); float x, y, z, w; x = (centerX - boxW / 2.0f) / param.imgW; y = (centerY - boxH / 2.0f) / param.imgH; z = (centerX + boxW / 2.0f) / param.imgW; w = (centerY + boxH / 2.0f) / param.imgH; if (param.clip) { x = min(max(x, 0.0f), 1.0f); y = min(max(y, 0.0f), 1.0f); z = min(max(z, 0.0f), 1.0f); w = min(max(w, 0.0f), 1.0f); } outputData[i * 4] = x; outputData[i * 4 + 1] = y; outputData[i * 4 + 2] = z; outputData[i * 4 + 3] = w; } } // Simply copy variance to from the parameter to output float* output = outputData + dim * 4; for (int i = blockIdx.x * nthdsPerCTA + threadIdx.x; i < dim; i += gridDim.x * nthdsPerCTA) { float x, y, z, w; x = param.variance[0]; y = param.variance[1]; z = param.variance[2]; w = param.variance[3]; output[i * 4] = x; output[i * 4 + 1] = y; output[i * 4 + 2] = z; output[i * 4 + 3] = w; } } pluginStatus_t priorBoxGpu( cudaStream_t stream, const PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const void* minSize, const void* maxSize, const void* aspectRatios, void* outputData) { const int dim = H * W * numPriors; if (dim > 5120) { const int BS = 128; const int GS = (dim + BS - 1) / BS; priorBoxKernel<BS><<<GS, BS, 0, stream>>>(param, H, W, numPriors, numAspectRatios, (const float*) minSize, (const float*) maxSize, (const float*) aspectRatios, (float*) outputData); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } else { const int BS = 32; const int GS = (dim + BS - 1) / BS; priorBoxKernel<BS><<<GS, BS, 0, stream>>>(param, H, W, numPriors, numAspectRatios, (const float*) minSize, (const float*) maxSize, (const float*) aspectRatios, (float*) outputData); CSC(cudaGetLastError(), STATUS_FAILURE); return STATUS_SUCCESS; } } pluginStatus_t priorBoxInference( cudaStream_t stream, const PriorBoxParameters param, const int H, const int W, const int numPriors, const int numAspectRatios, const void* minSize, const void* maxSize, const void* aspectRatios, void* outputData) { ASSERT(param.numMaxSize >= 0); if (param.numMaxSize) return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios, minSize, maxSize, aspectRatios, outputData); else return priorBoxGpu(stream, param, H, W, numPriors, numAspectRatios, minSize, nullptr, aspectRatios, outputData); }
53ee1ce44dd502e36ad89dd3fb0ee30f86548f0e.hip
// !!! This is a file automatically generated by hipify!!! /*! * \file GPUgaussMLEv2.cu * \author Keith Lidke * \date January 10, 2010 * \brief This file contains all of the Cuda kernels. The helper functions * are defined in GPUgaussLib.cuh */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mex.h" #include "hip/hip_runtime.h" #include "definitions.h" #include "MatInvLib.h" #include "GPUgaussLib.cuh" #include "GPUgaussMLEv2.h" //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit_noshared(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; //GRind=(int)s_Coords[0]; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]);//x kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]);//y dudt[2] = PSFx*PSFy;// I d2udt2[2] = 0.0f;// I dudt[3] = 1.0f;// bg d2udt2[3] = 0.0f;// bg cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); // add variance-gain ratio: v/g^2 if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); // add variance-gain ratio: v/g^2 cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/(model+gainR);// add gain ratio M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data;// add gain ratio else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ __shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data memcpy(s_data+(sz*sz*tx), d_data+(sz*sz*bx*BlockSize+sz*sz*tx),sizeof(float)*sz*sz); //initial values kernel_CenterofMass2D(sz, &s_data[sz*sz*tx], &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, &s_data[sz*sz*tx], &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigma(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS*NV_PS], Diag[NV_PS], Minv[NV_PS*NV_PS]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_PS; float dudt[NV_PS]; float d2udt2[NV_PS]; float NR_Numerator[NV_PS], NR_Denominator[NV_PS]; float theta[NV_PS]; float maxjump[NV_PS]={1e0f, 1e0f, 1e2f, 2e0f, 5e-1f}; float gamma[NV_PS]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_PS*NV_PS*sizeof(float)); memset(Minv,0,NV_PS*NV_PS*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_PS*sizeof(float)); memset(NR_Denominator,0,NV_PS*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[4]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], 0.5f); theta[4]=min(theta[4], sz/2.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/(model+gainR); M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_z(const float *d_data, float *d_Coords, const float *d_GainRatio, float *d_x0, const float PSFSigma_x, const float Ax, const float Ay, const float Bx, const float By, const float gamma, const float d, const float PSFSigma_y, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma_x sigma of the point spread function on the x axis * \param Ax ??? * \param Ay ??? * \param Bx ??? * \param By ??? * \param gamma ??? * \param d ??? * \param PSFSigma_y sigma of the point spread function on the y axis * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[5*5], Diag[5], Minv[5*5]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=5; float dudt[5]; float d2udt2[5]; float NR_Numerator[5], NR_Denominator[5]; float theta[5]; float maxjump[5]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f}; float g[5]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); float *z_initial = d_x0+(bx*BlockSize+tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma_x, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma_x*PSFSigma_y*sqrt(2.0f)); theta[4]=z_initial[0]; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay,Bx,By, gamma, d, &PSFx, &PSFy, dudt, d2udt2); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay, Bx,By, gamma, d, &PSFx, &PSFy, dudt, NULL); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigmaxy(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[6*6], Diag[6], Minv[6*6]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=6; float dudt[6]; float d2udt2[6]; float NR_Numerator[6], NR_Denominator[6]; float theta[6]; float maxjump[6]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f,1e-1f}; float g[6]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f,1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; theta[5]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], PSFSigma/10.0f); theta[5]=max(theta[5], PSFSigma/10.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; }
53ee1ce44dd502e36ad89dd3fb0ee30f86548f0e.cu
/*! * \file GPUgaussMLEv2.cu * \author Keith Lidke * \date January 10, 2010 * \brief This file contains all of the Cuda kernels. The helper functions * are defined in GPUgaussLib.cuh */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mex.h" #include "cuda_runtime.h" #include "definitions.h" #include "MatInvLib.h" #include "GPUgaussLib.cuh" #include "GPUgaussMLEv2.h" //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit_noshared(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; const int tx = threadIdx.x; const int bx = blockIdx.x; const int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; //GRind=(int)s_Coords[0]; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]);//x kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]);//y dudt[2] = PSFx*PSFy;// I d2udt2[2] = 0.0f;// I dudt[3] = 1.0f;// bg d2udt2[3] = 0.0f;// bg cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); // add variance-gain ratio: v/g^2 if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); // add variance-gain ratio: v/g^2 cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/(model+gainR);// add gain ratio M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data;// add gain ratio else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, const float PSFSigma, const int sz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ __shared__ float s_data[MEM]; float M[NV_P*NV_P], Diag[NV_P], Minv[NV_P*NV_P]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_P; float dudt[NV_P]; float d2udt2[NV_P]; float NR_Numerator[NV_P], NR_Denominator[NV_P]; float theta[NV_P]; float maxjump[NV_P]={1e0f, 1e0f, 1e2f, 2e0f}; float gamma[NV_P]={1.0f, 1.0f, 0.5f, 1.0f}; float Nmax; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_P*NV_P*sizeof(float)); memset(Minv,0,NV_P*NV_P*sizeof(float)); //load data memcpy(s_data+(sz*sz*tx), d_data+(sz*sz*bx*BlockSize+sz*sz*tx),sizeof(float)*sz*sz); //initial values kernel_CenterofMass2D(sz, &s_data[sz*sz*tx], &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, &s_data[sz*sz*tx], &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_P*sizeof(float)); memset(NR_Denominator,0,NV_P*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=data/model-1; if (model>10e-3f) df=data/pow(model, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*sz*tx+sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], PSFSigma, theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], PSFSigma, theta[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if (model>0) if (data>0)Div+=data*log(model)-model-data*log(data)+data; else Div+=-model; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigma(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS*NV_PS], Diag[NV_PS], Minv[NV_PS*NV_PS]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=NV_PS; float dudt[NV_PS]; float d2udt2[NV_PS]; float NR_Numerator[NV_PS], NR_Denominator[NV_PS]; float theta[NV_PS]; float maxjump[NV_PS]={1e0f, 1e0f, 1e2f, 2e0f, 5e-1f}; float gamma[NV_PS]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV_PS*NV_PS*sizeof(float)); memset(Minv,0,NV_PS*NV_PS*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV_PS*sizeof(float)); memset(NR_Denominator,0,NV_PS*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[4]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll=0;ll<NV;ll++) theta[ll]-=gamma[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], 0.5f); theta[4]=min(theta[4], sz/2.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], PSFSigma); PSFy=kernel_IntGauss1D(jj, theta[1], PSFSigma); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[4], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss2DSigma(ii, jj, theta[0], theta[1], theta[4], theta[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/(model+gainR); M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_z(const float *d_data, float *d_Coords, const float *d_GainRatio, float *d_x0, const float PSFSigma_x, const float Ax, const float Ay, const float Bx, const float By, const float gamma, const float d, const float PSFSigma_y, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma_x sigma of the point spread function on the x axis * \param Ax ??? * \param Ay ??? * \param Bx ??? * \param By ??? * \param gamma ??? * \param d ??? * \param PSFSigma_y sigma of the point spread function on the y axis * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[5*5], Diag[5], Minv[5*5]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=5; float dudt[5]; float d2udt2[5]; float NR_Numerator[5], NR_Denominator[5]; float theta[5]; float maxjump[5]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f}; float g[5]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); float *z_initial = d_x0+(bx*BlockSize+tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma_x, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma_x*PSFSigma_y*sqrt(2.0f)); theta[4]=z_initial[0]; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay,Bx,By, gamma, d, &PSFx, &PSFy, dudt, d2udt2); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll=0;ll<NV;ll++) theta[ll]-=min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { kernel_DerivativeIntGauss2Dz(ii, jj, theta, PSFSigma_x,PSFSigma_y, Ax,Ay, Bx,By, gamma, d, &PSFx, &PSFy, dudt, NULL); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating remaining derivatives dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; } //******************************************************************************************* __global__ void kernel_MLEFit_sigmaxy(const float *d_data, float *d_Coords, const float *d_GainRatio, const float PSFSigma, const int sz, const int Mapsz, const int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood,const int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param d_Coords array of subregions's pixel coordinates in original field of view. * \param d_GainRatio calibration result of variance/gain^2 in each pixel of original field of view. * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param Mapsz size of original field of view. * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[6*6], Diag[6], Minv[6*6]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; int NV=6; float dudt[6]; float d2udt2[6]; float NR_Numerator[6], NR_Denominator[6]; float theta[6]; float maxjump[6]={1e0f, 1e0f, 1e2f, 2e0f, 1e-1f,1e-1f}; float g[6]={1.0f, 1.0f, 0.5f, 1.0f, 1.0f,1.0f}; float Nmax; float gainR; int GRind; //Prevent read/write past end of array if ((bx*BlockSize+tx)>=Nfits) return; memset(M,0,NV*NV*sizeof(float)); memset(Minv,0,NV*NV*sizeof(float)); //load data const float *s_data = d_data+(sz*sz*bx*BlockSize+sz*sz*tx); float *s_Coords = d_Coords+(2*bx*BlockSize+2*tx); //initial values kernel_CenterofMass2D(sz, s_data, &theta[0], &theta[1]); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &theta[3]); theta[2]=max(0.0f, (Nmax-theta[3])*2*pi*PSFSigma*PSFSigma); theta[4]=PSFSigma; theta[5]=PSFSigma; for (kk=0;kk<iterations;kk++) {//main iterative loop //initialize memset(NR_Numerator,0,NV*sizeof(float)); memset(NR_Denominator,0,NV*sizeof(float)); for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; GRind=(s_Coords[1]+jj)*Mapsz+s_Coords[0]+ii; gainR=d_GainRatio[GRind]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf=0.0f; df=0.0f; if (model>10e-3f) cf=(data-model)/(model+gainR); if (model>10e-3f) df=(data+gainR)/pow(model+gainR, 2); cf=min(cf, 10e4f); df=min(df, 10e4f); for (ll=0;ll<NV;ll++){ NR_Numerator[ll]+=dudt[ll]*cf; NR_Denominator[ll]+=d2udt2[ll]*cf-pow(dudt[ll], 2)*df; } } // The update for (ll=0;ll<NV;ll++) theta[ll]-=g[ll]*min(max(NR_Numerator[ll]/NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints theta[2]=max(theta[2], 1.0f); theta[3]=max(theta[3], 0.01f); theta[4]=max(theta[4], PSFSigma/10.0f); theta[5]=max(theta[5], PSFSigma/10.0f); } // Calculating the CRLB and LogLikelihood Div=0.0f; for (ii=0;ii<sz;ii++) for(jj=0;jj<sz;jj++) { PSFx=kernel_IntGauss1D(ii, theta[0], theta[4]); PSFy=kernel_IntGauss1D(jj, theta[1], theta[5]); model=theta[3]+theta[2]*PSFx*PSFy; data=s_data[sz*jj+ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, theta[0], theta[4], theta[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, theta[1], theta[5], theta[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, theta[0], theta[4], theta[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, theta[1], theta[5], theta[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk=0;kk<NV;kk++)for (ll=kk;ll<NV;ll++){ M[kk*NV+ll]+= dudt[ll]*dudt[kk]/model; M[ll*NV+kk]=M[kk*NV+ll]; } //LogLikelyhood if ((model+gainR)>0) if ((data+gainR)>0)Div+=(data+gainR)*log(model+gainR)-model-(data+gainR)*log(data+gainR)+data; else Div+=-model-gainR; } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV); //write to global arrays for (kk=0;kk<NV;kk++) d_Parameters[Nfits*kk+BlockSize*bx+tx]=theta[kk]; for (kk=0;kk<NV;kk++) d_CRLBs[Nfits*kk+BlockSize*bx+tx]=Diag[kk]; d_LogLikelihood[BlockSize*bx+tx] = Div; return; }
7479d44bad1329ea6f5dfa36895cd96da3012e77.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include <inttypes.h> #include <string.h> #include <hip/hip_runtime.h> #include <omp.h> #define MAXN 1024 #define MaxProblem 1024 #define BLOCK_SIZE 512 #define ThreadNumber 2 #define UINT uint32_t uint32_t hostMtx[ThreadNumber][2][MAXN*MAXN]; uint32_t Ret[ThreadNumber][2][MAXN*MAXN]; uint32_t ANS[MaxProblem][2]; int problemindex=0; //====================================================== __global__ void matrixAdd( int N,UINT* A, UINT* B, UINT* C){ int row = blockIdx.x; int col = threadIdx.x; C[row*N + col] = A[row*N + col] + B[row*N + col]; } __global__ void matrixMul( int N,UINT* A, UINT* B, UINT* C){ int row = blockIdx.x; int col = threadIdx.x; UINT sum = 0; for(int k=0; k<N; k++) sum += A[row*N + k] * B[k*N + col]; C[row*N + col] = sum; } //============================================ void rand_gen(UINT c, int N, UINT A[MAXN*MAXN]) { UINT x = 2, n = N*N; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { x = (x * x + c + i + j)%n; A[i*N+j] = x; } } } UINT signature(int N, UINT A[MAXN*MAXN]) { UINT h = 0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) h = (h + A[i*N+j]) * 2654435761LU; } return h; } //========================================================== int main(int argc, char *argv[]) { omp_set_num_threads(ThreadNumber); uint32_t S[MaxProblem][64],TotalN[MaxProblem]; while(scanf("%d", &TotalN[problemindex]) == 1){ for (int i = 0; i < 2; i++) { scanf("%d", &S[problemindex][i]); } problemindex++; } //readIn(); uint32_t *cuIN[ThreadNumber][2], *cuTmp[ThreadNumber][6]; uint32_t memSz = MAXN*MAXN*sizeof(uint32_t); for(int k=0;k<ThreadNumber;k++){ for (int i = 0; i < 2; i++) { hipSetDevice(k); hipMalloc((void **) &cuIN[k][i], memSz); } for (int i = 0; i < 6; i++){ hipSetDevice(k); hipMalloc((void **) &cuTmp[k][i], memSz); } } #pragma omp parallel for schedule(dynamic , 1) for(int index=0;index<problemindex;index++){ int pid = omp_get_thread_num(); hipSetDevice(pid); int N=TotalN[index]; for (int i = 0; i < 2; i++) { rand_gen(S[index][i], N, hostMtx[pid][i]); hipMemcpy(cuIN[pid][i], hostMtx[pid][i], memSz, hipMemcpyHostToDevice); } // AB //multiply(cuIN[0], cuIN[1], cuTmp[0]); hipLaunchKernelGGL(( matrixMul), dim3(N), dim3(N), 0, 0, N, cuIN[pid][0], cuIN[pid][1], cuTmp[pid][0]); // BA //multiply(cuIN[1], cuIN[0], cuTmp[1]); hipLaunchKernelGGL(( matrixMul), dim3(N), dim3(N), 0, 0, N, cuIN[pid][1], cuIN[pid][0], cuTmp[pid][1]); //AB+BA //add(cuTmp[0], cuTmp[1], cuTmp[2]); hipLaunchKernelGGL(( matrixAdd), dim3(N), dim3(N), 0, 0, N, cuTmp[pid][0], cuTmp[pid][1], cuTmp[pid][2]); // ABA //multiply(cuTmp[0], cuIN[0], cuTmp[3]); hipLaunchKernelGGL(( matrixMul), dim3(N), dim3(N), 0, 0, N, cuTmp[pid][0], cuIN[pid][0], cuTmp[pid][3]); // BAB //multiply(cuTmp[1], cuIN[1], cuTmp[4]); hipLaunchKernelGGL(( matrixMul), dim3(N), dim3(N), 0, 0, N, cuTmp[pid][1], cuIN[pid][1], cuTmp[pid][4]); //ABA+BAB //add(cuTmp[3], cuTmp[4], cuTmp[5]); hipLaunchKernelGGL(( matrixAdd), dim3(N), dim3(N), 0, 0, N, cuTmp[pid][3], cuTmp[pid][4], cuTmp[pid][5]); hipMemcpy(Ret[pid][0], cuTmp[pid][2], memSz, hipMemcpyDeviceToHost); hipMemcpy(Ret[pid][1], cuTmp[pid][5], memSz, hipMemcpyDeviceToHost); for (int i = 0; i < 2; i++) { ANS[index][i] = signature(N, Ret[pid][i]); } } for(int index=0;index<problemindex;index++){ for (int i = 0; i < 2; i++) printf("%u\n", ANS[index][i]); } for(int k =0;k<ThreadNumber;k++){ for (int i = 0; i < 2; i++) hipFree(cuIN[k][i]); for (int i = 0; i < 6; i++) hipFree(cuTmp[k][i]); } return 0; }
7479d44bad1329ea6f5dfa36895cd96da3012e77.cu
#include <stdio.h> #include <assert.h> #include <inttypes.h> #include <string.h> #include <cuda.h> #include <omp.h> #define MAXN 1024 #define MaxProblem 1024 #define BLOCK_SIZE 512 #define ThreadNumber 2 #define UINT uint32_t uint32_t hostMtx[ThreadNumber][2][MAXN*MAXN]; uint32_t Ret[ThreadNumber][2][MAXN*MAXN]; uint32_t ANS[MaxProblem][2]; int problemindex=0; //====================================================== __global__ void matrixAdd( int N,UINT* A, UINT* B, UINT* C){ int row = blockIdx.x; int col = threadIdx.x; C[row*N + col] = A[row*N + col] + B[row*N + col]; } __global__ void matrixMul( int N,UINT* A, UINT* B, UINT* C){ int row = blockIdx.x; int col = threadIdx.x; UINT sum = 0; for(int k=0; k<N; k++) sum += A[row*N + k] * B[k*N + col]; C[row*N + col] = sum; } //============================================ void rand_gen(UINT c, int N, UINT A[MAXN*MAXN]) { UINT x = 2, n = N*N; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { x = (x * x + c + i + j)%n; A[i*N+j] = x; } } } UINT signature(int N, UINT A[MAXN*MAXN]) { UINT h = 0; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) h = (h + A[i*N+j]) * 2654435761LU; } return h; } //========================================================== int main(int argc, char *argv[]) { omp_set_num_threads(ThreadNumber); uint32_t S[MaxProblem][64],TotalN[MaxProblem]; while(scanf("%d", &TotalN[problemindex]) == 1){ for (int i = 0; i < 2; i++) { scanf("%d", &S[problemindex][i]); } problemindex++; } //readIn(); uint32_t *cuIN[ThreadNumber][2], *cuTmp[ThreadNumber][6]; uint32_t memSz = MAXN*MAXN*sizeof(uint32_t); for(int k=0;k<ThreadNumber;k++){ for (int i = 0; i < 2; i++) { cudaSetDevice(k); cudaMalloc((void **) &cuIN[k][i], memSz); } for (int i = 0; i < 6; i++){ cudaSetDevice(k); cudaMalloc((void **) &cuTmp[k][i], memSz); } } #pragma omp parallel for schedule(dynamic , 1) for(int index=0;index<problemindex;index++){ int pid = omp_get_thread_num(); cudaSetDevice(pid); int N=TotalN[index]; for (int i = 0; i < 2; i++) { rand_gen(S[index][i], N, hostMtx[pid][i]); cudaMemcpy(cuIN[pid][i], hostMtx[pid][i], memSz, cudaMemcpyHostToDevice); } // AB //multiply(cuIN[0], cuIN[1], cuTmp[0]); matrixMul<<<N, N>>>(N, cuIN[pid][0], cuIN[pid][1], cuTmp[pid][0]); // BA //multiply(cuIN[1], cuIN[0], cuTmp[1]); matrixMul<<<N, N>>>(N, cuIN[pid][1], cuIN[pid][0], cuTmp[pid][1]); //AB+BA //add(cuTmp[0], cuTmp[1], cuTmp[2]); matrixAdd<<<N, N>>>(N, cuTmp[pid][0], cuTmp[pid][1], cuTmp[pid][2]); // ABA //multiply(cuTmp[0], cuIN[0], cuTmp[3]); matrixMul<<<N, N>>>(N, cuTmp[pid][0], cuIN[pid][0], cuTmp[pid][3]); // BAB //multiply(cuTmp[1], cuIN[1], cuTmp[4]); matrixMul<<<N, N>>>(N, cuTmp[pid][1], cuIN[pid][1], cuTmp[pid][4]); //ABA+BAB //add(cuTmp[3], cuTmp[4], cuTmp[5]); matrixAdd<<<N, N>>>(N, cuTmp[pid][3], cuTmp[pid][4], cuTmp[pid][5]); cudaMemcpy(Ret[pid][0], cuTmp[pid][2], memSz, cudaMemcpyDeviceToHost); cudaMemcpy(Ret[pid][1], cuTmp[pid][5], memSz, cudaMemcpyDeviceToHost); for (int i = 0; i < 2; i++) { ANS[index][i] = signature(N, Ret[pid][i]); } } for(int index=0;index<problemindex;index++){ for (int i = 0; i < 2; i++) printf("%u\n", ANS[index][i]); } for(int k =0;k<ThreadNumber;k++){ for (int i = 0; i < 2; i++) cudaFree(cuIN[k][i]); for (int i = 0; i < 6; i++) cudaFree(cuTmp[k][i]); } return 0; }
a3f7512e0168a1a49890792e289dbe082511a7f5.hip
// !!! This is a file automatically generated by hipify!!! /* 2019.06.22 by Aurora. Contact:[email protected] Compute descriptors and derivatives.(GPU version) */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <torch/extension.h> #include "../../c/struct.h" /*****************MACRO FOR DEBUG*****************/ #define DEBUG_CONV_EXT #ifdef DEBUG_CONV_EXT #define printf_d printf #else #define printf_d // #endif /***************MACRO FOR DEBUG END***************/ #define warpSize 32 #define PI 3.141592653589793238462643383279 __device__ double s_r(double r_ij, double r_c, double r_cs) { double result; double rc = r_c; double rcs = r_cs; result = (r_ij >= rc) ? 0 : ((r_ij >= rcs) ? 1 / r_ij * (0.5 * cos((r_ij - rcs) / (rc - rcs) * PI) + 0.5) : 1 / r_ij); return result; } __device__ double fastpow2(double number, int dummy) { return number * number; } __global__ void calc_descrpt_and_deriv_DPMD_kernel(int Nframes_tot, int N_Atoms_max, int SEL_A_max, double rc, double rcs, torch::PackedTensorAccessor<double, 3> COORD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 4> NEI_COORD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR) { int frame_loop = blockIdx.x; int atom_loop = threadIdx.x; int k; int i = frame_loop, j = atom_loop; int l; double r_c = rc; double r_cs = rcs; if ((frame_loop >= Nframes_tot) || (atom_loop >= N_Atoms_max)) { return; } for (k = 0; k <= SEL_A_max - 1; k++) { double four_coord[4]; double r_ij; double atom_coord[3] = {COORD_RESHAPE_ACCESSOR[i][j][0], COORD_RESHAPE_ACCESSOR[i][j][1], COORD_RESHAPE_ACCESSOR[i][j][2]}; double nei_coord[3] = {NEI_COORD_RESHAPE_ACCESSOR[i][j][k][0], NEI_COORD_RESHAPE_ACCESSOR[i][j][k][1], NEI_COORD_RESHAPE_ACCESSOR[i][j][k][2]}; double r_ji_coord[3] = {nei_coord[0] - atom_coord[0], nei_coord[1] - atom_coord[1], nei_coord[2] - atom_coord[2]}; r_ij = sqrt(fastpow2(atom_coord[0] - nei_coord[0], 2) + fastpow2(atom_coord[1] - nei_coord[1], 2) + fastpow2(atom_coord[2] - nei_coord[2], 2)); four_coord[0] = s_r(r_ij, r_c, r_cs); four_coord[1] = four_coord[0] * r_ji_coord[0] / r_ij; four_coord[2] = four_coord[0] * r_ji_coord[1] / r_ij; four_coord[3] = four_coord[0] * r_ji_coord[2] / r_ij; for (l = 0; l <= 3; l++) { int idx_sym = k * 4 + l; SYM_COORD_DPMD_RESHAPE_ACCESSOR[i][j][idx_sym] = four_coord[l]; } if (r_ij >= rc) { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; } else if (r_ij > rcs) { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[0] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[0] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[0] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[0] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[0] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[1] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[2] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[2] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); } else { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[0] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[0] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[0] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[0] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[1] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[1] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[2] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[2] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); } } } std::vector<torch::Tensor> calc_descrpt_and_deriv_DPMD(torch::Tensor COORD, torch::Tensor NEI_COORD, torch::Tensor NEI_DIST, int Nframes_tot, int N_Atoms_max, int SEL_A_max, double rcs, double rc) { torch::Tensor SYM_COORD_DPMD_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DX_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DY_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DZ_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD; torch::Tensor SYM_COORD_DPMD_DX; torch::Tensor SYM_COORD_DPMD_DY; torch::Tensor SYM_COORD_DPMD_DZ; torch::Tensor COORD_RESHAPE = torch::reshape(COORD, {Nframes_tot, N_Atoms_max, 3}); torch::Tensor NEI_COORD_RESHAPE = torch::reshape(NEI_COORD, {Nframes_tot, N_Atoms_max, SEL_A_max, 3}); auto COORD_RESHAPE_ACCESSOR = COORD_RESHAPE.packed_accessor<double, 3>(); auto NEI_COORD_RESHAPE_ACCESSOR = NEI_COORD_RESHAPE.packed_accessor<double, 4>(); auto SYM_COORD_DPMD_RESHAPE_ACCESSOR = SYM_COORD_DPMD_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DX_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DY_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DZ_RESHAPE.packed_accessor<double, 3>(); int i = 0, j = 0, k = 0, l = 0; parameters_info_struct * parameters_info = (parameters_info_struct *)calloc(1, sizeof(parameters_info_struct)); parameters_info->cutoff_1 = rcs; parameters_info->cutoff_2 = rc; hipLaunchKernelGGL(( calc_descrpt_and_deriv_DPMD_kernel), dim3(Nframes_tot), dim3((N_Atoms_max / 32 + 1) * 32), 0, 0, Nframes_tot, N_Atoms_max, SEL_A_max, rc, rcs, COORD_RESHAPE_ACCESSOR, NEI_COORD_RESHAPE_ACCESSOR, SYM_COORD_DPMD_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR); free(parameters_info); SYM_COORD_DPMD = torch::reshape(SYM_COORD_DPMD_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DX = torch::reshape(SYM_COORD_DPMD_DX_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DY = torch::reshape(SYM_COORD_DPMD_DY_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DZ = torch::reshape(SYM_COORD_DPMD_DZ_RESHAPE, {Nframes_tot, -1}); return {SYM_COORD_DPMD, SYM_COORD_DPMD_DX, SYM_COORD_DPMD_DY, SYM_COORD_DPMD_DZ}; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("calc_descrpt_and_deriv_DPMD", &calc_descrpt_and_deriv_DPMD, "TEST calc_descrpt_and_deriv_DPMD"); }
a3f7512e0168a1a49890792e289dbe082511a7f5.cu
/* 2019.06.22 by Aurora. Contact:[email protected] Compute descriptors and derivatives.(GPU version) */ #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <torch/extension.h> #include "../../c/struct.h" /*****************MACRO FOR DEBUG*****************/ #define DEBUG_CONV_EXT #ifdef DEBUG_CONV_EXT #define printf_d printf #else #define printf_d // #endif /***************MACRO FOR DEBUG END***************/ #define warpSize 32 #define PI 3.141592653589793238462643383279 __device__ double s_r(double r_ij, double r_c, double r_cs) { double result; double rc = r_c; double rcs = r_cs; result = (r_ij >= rc) ? 0 : ((r_ij >= rcs) ? 1 / r_ij * (0.5 * cos((r_ij - rcs) / (rc - rcs) * PI) + 0.5) : 1 / r_ij); return result; } __device__ double fastpow2(double number, int dummy) { return number * number; } __global__ void calc_descrpt_and_deriv_DPMD_kernel(int Nframes_tot, int N_Atoms_max, int SEL_A_max, double rc, double rcs, torch::PackedTensorAccessor<double, 3> COORD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 4> NEI_COORD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR, torch::PackedTensorAccessor<double, 3> SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR) { int frame_loop = blockIdx.x; int atom_loop = threadIdx.x; int k; int i = frame_loop, j = atom_loop; int l; double r_c = rc; double r_cs = rcs; if ((frame_loop >= Nframes_tot) || (atom_loop >= N_Atoms_max)) { return; } for (k = 0; k <= SEL_A_max - 1; k++) { double four_coord[4]; double r_ij; double atom_coord[3] = {COORD_RESHAPE_ACCESSOR[i][j][0], COORD_RESHAPE_ACCESSOR[i][j][1], COORD_RESHAPE_ACCESSOR[i][j][2]}; double nei_coord[3] = {NEI_COORD_RESHAPE_ACCESSOR[i][j][k][0], NEI_COORD_RESHAPE_ACCESSOR[i][j][k][1], NEI_COORD_RESHAPE_ACCESSOR[i][j][k][2]}; double r_ji_coord[3] = {nei_coord[0] - atom_coord[0], nei_coord[1] - atom_coord[1], nei_coord[2] - atom_coord[2]}; r_ij = sqrt(fastpow2(atom_coord[0] - nei_coord[0], 2) + fastpow2(atom_coord[1] - nei_coord[1], 2) + fastpow2(atom_coord[2] - nei_coord[2], 2)); four_coord[0] = s_r(r_ij, r_c, r_cs); four_coord[1] = four_coord[0] * r_ji_coord[0] / r_ij; four_coord[2] = four_coord[0] * r_ji_coord[1] / r_ij; four_coord[3] = four_coord[0] * r_ji_coord[2] / r_ij; for (l = 0; l <= 3; l++) { int idx_sym = k * 4 + l; SYM_COORD_DPMD_RESHAPE_ACCESSOR[i][j][idx_sym] = four_coord[l]; } if (r_ij >= rc) { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 0; SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 0; } else if (r_ij > rcs) { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[0] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[0] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[0] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[0] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[0] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[1] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[1] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[1] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij) + PI * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[0] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) + PI * r_ji_coord[1] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[2] * r_ji_coord[2] * (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij * r_ij * r_ij) - (0.5 + 0.5 * cos(PI * (r_ij - rcs) / (rc - rcs))) / (r_ij * r_ij) + PI * r_ji_coord[2] * r_ji_coord[2] * sin(PI * (r_ij - rcs) / (rc - rcs)) / 2.0 / (rc - rcs) / (r_ij * r_ij * r_ij); } else { SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[0] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[0] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[0] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[0] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[1] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[1] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[1] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 0] = r_ji_coord[2] / (r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 1] = 2.0 * r_ji_coord[0] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 2] = 2.0 * r_ji_coord[1] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij); SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR[i][j][4 * k + 3] = 2.0 * r_ji_coord[2] * r_ji_coord[2] / (r_ij * r_ij * r_ij * r_ij) - 1.0 / (r_ij * r_ij); } } } std::vector<torch::Tensor> calc_descrpt_and_deriv_DPMD(torch::Tensor COORD, torch::Tensor NEI_COORD, torch::Tensor NEI_DIST, int Nframes_tot, int N_Atoms_max, int SEL_A_max, double rcs, double rc) { torch::Tensor SYM_COORD_DPMD_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DX_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DY_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD_DZ_RESHAPE = torch::zeros({Nframes_tot, N_Atoms_max, SEL_A_max * 4}, torch::TensorOptions().dtype(torch::kFloat64).device(torch::kCUDA)); torch::Tensor SYM_COORD_DPMD; torch::Tensor SYM_COORD_DPMD_DX; torch::Tensor SYM_COORD_DPMD_DY; torch::Tensor SYM_COORD_DPMD_DZ; torch::Tensor COORD_RESHAPE = torch::reshape(COORD, {Nframes_tot, N_Atoms_max, 3}); torch::Tensor NEI_COORD_RESHAPE = torch::reshape(NEI_COORD, {Nframes_tot, N_Atoms_max, SEL_A_max, 3}); auto COORD_RESHAPE_ACCESSOR = COORD_RESHAPE.packed_accessor<double, 3>(); auto NEI_COORD_RESHAPE_ACCESSOR = NEI_COORD_RESHAPE.packed_accessor<double, 4>(); auto SYM_COORD_DPMD_RESHAPE_ACCESSOR = SYM_COORD_DPMD_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DX_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DY_RESHAPE.packed_accessor<double, 3>(); auto SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR = SYM_COORD_DPMD_DZ_RESHAPE.packed_accessor<double, 3>(); int i = 0, j = 0, k = 0, l = 0; parameters_info_struct * parameters_info = (parameters_info_struct *)calloc(1, sizeof(parameters_info_struct)); parameters_info->cutoff_1 = rcs; parameters_info->cutoff_2 = rc; calc_descrpt_and_deriv_DPMD_kernel<<<Nframes_tot, (N_Atoms_max / 32 + 1) * 32>>>(Nframes_tot, N_Atoms_max, SEL_A_max, rc, rcs, COORD_RESHAPE_ACCESSOR, NEI_COORD_RESHAPE_ACCESSOR, SYM_COORD_DPMD_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DX_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DY_RESHAPE_ACCESSOR, SYM_COORD_DPMD_DZ_RESHAPE_ACCESSOR); free(parameters_info); SYM_COORD_DPMD = torch::reshape(SYM_COORD_DPMD_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DX = torch::reshape(SYM_COORD_DPMD_DX_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DY = torch::reshape(SYM_COORD_DPMD_DY_RESHAPE, {Nframes_tot, -1}); SYM_COORD_DPMD_DZ = torch::reshape(SYM_COORD_DPMD_DZ_RESHAPE, {Nframes_tot, -1}); return {SYM_COORD_DPMD, SYM_COORD_DPMD_DX, SYM_COORD_DPMD_DY, SYM_COORD_DPMD_DZ}; } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("calc_descrpt_and_deriv_DPMD", &calc_descrpt_and_deriv_DPMD, "TEST calc_descrpt_and_deriv_DPMD"); }
7685e10d1dce5984d03c303b4bb9f307ff24a773.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "nvstrings/NVText.h" #include "../custring_view.cuh" #include "../util.h" struct porter_stemmer_measure_fn { custring_view_array d_strings; custring_view* d_vowels; Char y_char; unsigned int* d_results; __device__ bool is_consonant( custring_view* dstr, int index ) { Char ch = dstr->at(index); if( d_vowels->find(ch) >= 0 ) return false; if( (ch != y_char) || (index==0) ) return true; ch = dstr->at(index-1); // only if previous char return d_vowels->find(ch)>=0; // is not a consonant } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int vcs = 0; bool vowel_run = !is_consonant(dstr,0); for( auto itr=dstr->begin(); itr!=dstr->end(); itr++ ) { if( is_consonant(dstr,itr.position()) ) { if( vowel_run ) vcs++; vowel_run = false; } else vowel_run = true; } d_results[idx] = vcs; } }; unsigned int NVText::porter_stemmer_measure(NVStrings& strs, const char* vowels, const char* y_char, unsigned int* results, bool bdevmem ) { unsigned int count = strs.size(); if( count==0 ) return 0; // nothing to do auto execpol = rmm::exec_policy(0); // setup results vector unsigned int* d_results = results; if( !bdevmem ) d_results = device_alloc<unsigned int>(count,0); if( vowels==nullptr ) vowels = "aeiou"; custring_view* d_vowels = custring_from_host(vowels); if( y_char==nullptr ) y_char = "y"; Char char_y; custring_view::char_to_Char(y_char,char_y); // get the string pointers rmm::device_vector<custring_view*> strings(count,nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // do the measure thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, porter_stemmer_measure_fn{d_strings,d_vowels,char_y,d_results}); // done if( !bdevmem ) { CUDA_TRY( hipMemcpyAsync(results,d_results,count*sizeof(unsigned int),hipMemcpyDeviceToHost)) RMM_FREE(d_results,0); } RMM_FREE(d_vowels,0); return 0; }
7685e10d1dce5984d03c303b4bb9f307ff24a773.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "nvstrings/NVText.h" #include "../custring_view.cuh" #include "../util.h" struct porter_stemmer_measure_fn { custring_view_array d_strings; custring_view* d_vowels; Char y_char; unsigned int* d_results; __device__ bool is_consonant( custring_view* dstr, int index ) { Char ch = dstr->at(index); if( d_vowels->find(ch) >= 0 ) return false; if( (ch != y_char) || (index==0) ) return true; ch = dstr->at(index-1); // only if previous char return d_vowels->find(ch)>=0; // is not a consonant } __device__ void operator()(unsigned int idx) { custring_view* dstr = d_strings[idx]; if( !dstr ) return; unsigned int vcs = 0; bool vowel_run = !is_consonant(dstr,0); for( auto itr=dstr->begin(); itr!=dstr->end(); itr++ ) { if( is_consonant(dstr,itr.position()) ) { if( vowel_run ) vcs++; vowel_run = false; } else vowel_run = true; } d_results[idx] = vcs; } }; unsigned int NVText::porter_stemmer_measure(NVStrings& strs, const char* vowels, const char* y_char, unsigned int* results, bool bdevmem ) { unsigned int count = strs.size(); if( count==0 ) return 0; // nothing to do auto execpol = rmm::exec_policy(0); // setup results vector unsigned int* d_results = results; if( !bdevmem ) d_results = device_alloc<unsigned int>(count,0); if( vowels==nullptr ) vowels = "aeiou"; custring_view* d_vowels = custring_from_host(vowels); if( y_char==nullptr ) y_char = "y"; Char char_y; custring_view::char_to_Char(y_char,char_y); // get the string pointers rmm::device_vector<custring_view*> strings(count,nullptr); custring_view** d_strings = strings.data().get(); strs.create_custring_index(d_strings); // do the measure thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, porter_stemmer_measure_fn{d_strings,d_vowels,char_y,d_results}); // done if( !bdevmem ) { CUDA_TRY( cudaMemcpyAsync(results,d_results,count*sizeof(unsigned int),cudaMemcpyDeviceToHost)) RMM_FREE(d_results,0); } RMM_FREE(d_vowels,0); return 0; }
ba3174e21c8af112e9abe5979226497040da3793.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void solution(float* img, float* xbar, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; img[i] = xbar[i]; } } }
ba3174e21c8af112e9abe5979226497040da3793.cu
__global__ void solution(float* img, float* xbar, int w, int h, int nc) { int x = threadIdx.x + blockDim.x * blockIdx.x; int y = threadIdx.y + blockDim.y * blockIdx.y; if (x < w && y < h) { int i; for (int z = 0; z < nc; z++) { i = x + w * y + w * h * z; img[i] = xbar[i]; } } }
f1749fdce68f98d80fb3c8e39b5501bfec3ae213.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Mask_Invert_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; hipMalloc(&A, XSIZE*YSIZE); int *devOut = NULL; hipMalloc(&devOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Mask_Invert_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,devOut); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Mask_Invert_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,devOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Mask_Invert_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, A,devOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f1749fdce68f98d80fb3c8e39b5501bfec3ae213.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Mask_Invert_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); int *devOut = NULL; cudaMalloc(&devOut, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Mask_Invert_Kernel<<<gridBlock,threadBlock>>>(A,devOut); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Mask_Invert_Kernel<<<gridBlock,threadBlock>>>(A,devOut); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Mask_Invert_Kernel<<<gridBlock,threadBlock>>>(A,devOut); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ddf8135b05e295b1943eac93eb49e47e6db0a43a.hip
// !!! This is a file automatically generated by hipify!!! #define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) #include <hipfft.h> #include <stdio.h> static const char *_cudaGetErrorEnum (hipfftResult error) { switch (error) { #define cr(x) case CUFFT_##x: return #x cr (SUCCESS); cr (INVALID_PLAN); cr (ALLOC_FAILED); cr (INVALID_TYPE); cr (INVALID_VALUE); cr (INTERNAL_ERROR); cr (EXEC_FAILED); cr (SETUP_FAILED); cr (INVALID_SIZE); cr (UNALIGNED_DATA); #undef cr } return "UNKNOWN"; } static inline void __cufftSafeCall (hipfftResult err, const char * file, const int line) { if( HIPFFT_SUCCESS != err) { fprintf (stderr, "CUFFT error at %s:%d\n", file, line); fprintf (stderr, "CUFFT error %d %s\n", err, _cudaGetErrorEnum (err)); hipDeviceReset (); } } extern "C" void #ifdef TRANS_SINGLE execute_plan_fftc_ (hipfftHandle * PLANp, int * ISIGNp, hipfftComplex * data) #else execute_plan_fftc_ (hipfftHandle * PLANp, int * ISIGNp, hipfftDoubleComplex * data) #endif { hipfftHandle plan = *PLANp; int ISIGN = *ISIGNp; /*if (hipDeviceSynchronize() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); return; }*/ if (ISIGN== -1) { #ifdef TRANS_SINGLE cufftSafeCall(hipfftExecR2C(plan, (hipfftReal*)data, data)); #else cufftSafeCall(hipfftExecD2Z(plan, (hipfftDoubleReal*)data, data)); #endif } else if (ISIGN== 1) { #ifdef TRANS_SINGLE cufftSafeCall(hipfftExecC2R(plan, data, (hipfftReal*)data)); #else cufftSafeCall(hipfftExecZ2D(plan, data, (hipfftDoubleReal*)data)); #endif } else { abort(); } // hipDeviceSynchronize(); //if (hipDeviceSynchronize() != hipSuccess){ // fprintf(stderr, "Cuda error: Failed to synchronize\n"); // return; //} }
ddf8135b05e295b1943eac93eb49e47e6db0a43a.cu
#define cufftSafeCall(err) __cufftSafeCall(err, __FILE__, __LINE__) #include <cufft.h> #include <stdio.h> static const char *_cudaGetErrorEnum (cufftResult error) { switch (error) { #define cr(x) case CUFFT_##x: return #x cr (SUCCESS); cr (INVALID_PLAN); cr (ALLOC_FAILED); cr (INVALID_TYPE); cr (INVALID_VALUE); cr (INTERNAL_ERROR); cr (EXEC_FAILED); cr (SETUP_FAILED); cr (INVALID_SIZE); cr (UNALIGNED_DATA); #undef cr } return "UNKNOWN"; } static inline void __cufftSafeCall (cufftResult err, const char * file, const int line) { if( CUFFT_SUCCESS != err) { fprintf (stderr, "CUFFT error at %s:%d\n", file, line); fprintf (stderr, "CUFFT error %d %s\n", err, _cudaGetErrorEnum (err)); cudaDeviceReset (); } } extern "C" void #ifdef TRANS_SINGLE execute_plan_fftc_ (cufftHandle * PLANp, int * ISIGNp, cufftComplex * data) #else execute_plan_fftc_ (cufftHandle * PLANp, int * ISIGNp, cufftDoubleComplex * data) #endif { cufftHandle plan = *PLANp; int ISIGN = *ISIGNp; /*if (cudaDeviceSynchronize() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); return; }*/ if (ISIGN== -1) { #ifdef TRANS_SINGLE cufftSafeCall(cufftExecR2C(plan, (cufftReal*)data, data)); #else cufftSafeCall(cufftExecD2Z(plan, (cufftDoubleReal*)data, data)); #endif } else if (ISIGN== 1) { #ifdef TRANS_SINGLE cufftSafeCall(cufftExecC2R(plan, data, (cufftReal*)data)); #else cufftSafeCall(cufftExecZ2D(plan, data, (cufftDoubleReal*)data)); #endif } else { abort(); } // cudaDeviceSynchronize(); //if (cudaDeviceSynchronize() != cudaSuccess){ // fprintf(stderr, "Cuda error: Failed to synchronize\n"); // return; //} }
25f5b1caf5652d9d3a4d398f0b6ec9043a4f0123.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 1 << 24 #define threads_per_block 512 struct Lock { int *mutex; Lock(void) { int state = 0; hipMalloc((void **)&mutex, sizeof(int)); hipMemcpy(mutex, &state, sizeof(int), hipMemcpyHostToDevice); } ~Lock(void) { hipFree(mutex); } __device__ void lock(void) { while (atomicCAS(mutex, 0, 1) != 0) ; } __device__ void unlock(void) { atomicExch(mutex, 0); } }; __global__ void GPU_big_dot(float *a, float *b, float *c, int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { c[blockIdx.x] = shared[0]; } } __global__ void atomic_function_GPU_big_dot(float *a, float *b, float *c, int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { atomicAdd(c, shared[0]); } } __global__ void atomic_lock_GPU_big_dot(float *a, float *b, float *c, int n, Lock lock) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { lock.lock(); c[0] += shared[0]; lock.unlock(); } } void random_vecotr_init(float *vector) { for (int i = 0; i < N; i++) { vector[i] = (float)rand() / RAND_MAX; } } int main(int argc, char *argv[]) { float *vector1, *vector2, *d_v1, *d_v2, *d_result, *d_result_atomic_fun, *d_result_atomic_lock, *GPUResult, *GPUResult_atmoic_function, *GPUResult_atmoic_lock, gpuSum = 0.0; size_t vector_size = sizeof(float) * N; // allocate memory space for host memory vector1 = (float *)malloc(vector_size); vector2 = (float *)malloc(vector_size); // init two vectors with random float numbers srand(time(NULL)); random_vecotr_init(vector1); random_vecotr_init(vector2); // print_vector(vector1, (char *)"vector1"); // print_vector(vector2, (char *)"vector2"); // define grid and block size dim3 block(threads_per_block, 1); dim3 grid((N + block.x - 1) / block.x, 1); printf("grid= %d block= %d\n", grid.x, block.x); // capture the start time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate memory space for device memory hipMalloc((void **)&d_v1, vector_size); hipMalloc((void **)&d_v2, vector_size); hipMalloc((void **)&d_result, grid.x * sizeof(float)); hipMalloc((void **)&d_result_atomic_fun, sizeof(float)); hipMalloc((void **)&d_result_atomic_lock, sizeof(float)); GPUResult = (float *)malloc(grid.x * sizeof(float)); GPUResult_atmoic_function = (float *)malloc(sizeof(float)); GPUResult_atmoic_lock = (float *)malloc(sizeof(float)); // Kernel1 :shared memory and parallel reduction // copy vectors from host to device hipMemcpy(d_v1, vector1, vector_size, hipMemcpyHostToDevice); hipMemcpy(d_v2, vector2, vector_size, hipMemcpyHostToDevice); // launch kernel hipEventRecord(start, 0); hipLaunchKernelGGL(( GPU_big_dot), dim3(grid), dim3(block), 0, 0, d_v1, d_v2, d_result, N); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime1, elapsedTime2, elapsedTime3; hipEventElapsedTime(&elapsedTime1, start, stop); printf("Kernel no atomic execution time: %3.10f sec\n", elapsedTime1 / 1000); // copy result back to host hipMemcpy(GPUResult, d_result, grid.x * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < grid.x; i++) { gpuSum += GPUResult[i]; } // kernel2: atomic function and shared memory and parallel reduction // copy vectors from host to device hipMemcpy(d_v1, vector1, vector_size, hipMemcpyHostToDevice); hipMemcpy(d_v2, vector2, vector_size, hipMemcpyHostToDevice); // launch atomic func kernel hipEventRecord(start, 0); hipLaunchKernelGGL(( atomic_function_GPU_big_dot), dim3(grid), dim3(block), 0, 0, d_v1, d_v2, d_result_atomic_fun, N); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime2, start, stop); printf("Kernel atomic_function execution time: %3.10f sec\n", elapsedTime2 / 1000); // copy result back to host hipMemcpy(GPUResult_atmoic_function, d_result_atomic_fun, sizeof(float), hipMemcpyDeviceToHost); // kernel3 : atomic lock and shared memory and parallel reduction // copy vectors from host to device hipMemcpy(d_v1, vector1, vector_size, hipMemcpyHostToDevice); hipMemcpy(d_v2, vector2, vector_size, hipMemcpyHostToDevice); // launch atomic lock kernel Lock lock; hipEventRecord(start, 0); hipLaunchKernelGGL(( atomic_lock_GPU_big_dot), dim3(grid), dim3(block), 0, 0, d_v1, d_v2, d_result_atomic_lock, N, lock); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime3, start, stop); printf("Kernel atomic_lock execution time: %3.10f sec\n", elapsedTime3 / 1000); // copy result back to host hipMemcpy(GPUResult_atmoic_lock, d_result_atomic_lock, sizeof(float), hipMemcpyDeviceToHost); // cleanup hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_v1); hipFree(d_v2); hipFree(d_result); hipFree(d_result_atomic_fun); hipFree(d_result_atomic_lock); float speedup1 = elapsedTime1 / elapsedTime2; printf("speed up for atomic function = %.5f\t\n", speedup1); float speedup2 = elapsedTime1 / elapsedTime3; printf("speed up for atomic lock = %.5f\t\n", speedup2); printf("kernel1 shared memory and parallel reduction computation result = " "%f\t\n", gpuSum); printf("kernel2 atomic function ,shared memory and parallel reduction " "computation result = %f\t\n", GPUResult_atmoic_function); printf("kernel3 atomic lock ,shared memory and parallel reduction " "computation result = %f\t\n", GPUResult_atmoic_lock); return 0; }
25f5b1caf5652d9d3a4d398f0b6ec9043a4f0123.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 1 << 24 #define threads_per_block 512 struct Lock { int *mutex; Lock(void) { int state = 0; cudaMalloc((void **)&mutex, sizeof(int)); cudaMemcpy(mutex, &state, sizeof(int), cudaMemcpyHostToDevice); } ~Lock(void) { cudaFree(mutex); } __device__ void lock(void) { while (atomicCAS(mutex, 0, 1) != 0) ; } __device__ void unlock(void) { atomicExch(mutex, 0); } }; __global__ void GPU_big_dot(float *a, float *b, float *c, int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { c[blockIdx.x] = shared[0]; } } __global__ void atomic_function_GPU_big_dot(float *a, float *b, float *c, int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { atomicAdd(c, shared[0]); } } __global__ void atomic_lock_GPU_big_dot(float *a, float *b, float *c, int n, Lock lock) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // covert global data pointer to the local a and b array 's pointer of this // block float *ia = a + blockIdx.x * blockDim.x; float *ib = b + blockIdx.x * blockDim.x; // boundary check if (idx >= n) return; // declare shared memory __shared__ float shared[threads_per_block]; // put resultt to the shared memory shared[tid] = ia[tid] * ib[tid]; __syncthreads(); // in-place reduction in shared memory for (int stride = 1; stride < blockDim.x; stride *= 2) { if ((tid % (2 * stride)) == 0) { shared[tid] += shared[tid + stride]; } __syncthreads(); } // write result for this block in shared memory to global mem if (tid == 0) { lock.lock(); c[0] += shared[0]; lock.unlock(); } } void random_vecotr_init(float *vector) { for (int i = 0; i < N; i++) { vector[i] = (float)rand() / RAND_MAX; } } int main(int argc, char *argv[]) { float *vector1, *vector2, *d_v1, *d_v2, *d_result, *d_result_atomic_fun, *d_result_atomic_lock, *GPUResult, *GPUResult_atmoic_function, *GPUResult_atmoic_lock, gpuSum = 0.0; size_t vector_size = sizeof(float) * N; // allocate memory space for host memory vector1 = (float *)malloc(vector_size); vector2 = (float *)malloc(vector_size); // init two vectors with random float numbers srand(time(NULL)); random_vecotr_init(vector1); random_vecotr_init(vector2); // print_vector(vector1, (char *)"vector1"); // print_vector(vector2, (char *)"vector2"); // define grid and block size dim3 block(threads_per_block, 1); dim3 grid((N + block.x - 1) / block.x, 1); printf("grid= %d block= %d\n", grid.x, block.x); // capture the start time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory space for device memory cudaMalloc((void **)&d_v1, vector_size); cudaMalloc((void **)&d_v2, vector_size); cudaMalloc((void **)&d_result, grid.x * sizeof(float)); cudaMalloc((void **)&d_result_atomic_fun, sizeof(float)); cudaMalloc((void **)&d_result_atomic_lock, sizeof(float)); GPUResult = (float *)malloc(grid.x * sizeof(float)); GPUResult_atmoic_function = (float *)malloc(sizeof(float)); GPUResult_atmoic_lock = (float *)malloc(sizeof(float)); // Kernel1 :shared memory and parallel reduction // copy vectors from host to device cudaMemcpy(d_v1, vector1, vector_size, cudaMemcpyHostToDevice); cudaMemcpy(d_v2, vector2, vector_size, cudaMemcpyHostToDevice); // launch kernel cudaEventRecord(start, 0); GPU_big_dot<<<grid, block>>>(d_v1, d_v2, d_result, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime1, elapsedTime2, elapsedTime3; cudaEventElapsedTime(&elapsedTime1, start, stop); printf("Kernel no atomic execution time: %3.10f sec\n", elapsedTime1 / 1000); // copy result back to host cudaMemcpy(GPUResult, d_result, grid.x * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < grid.x; i++) { gpuSum += GPUResult[i]; } // kernel2: atomic function and shared memory and parallel reduction // copy vectors from host to device cudaMemcpy(d_v1, vector1, vector_size, cudaMemcpyHostToDevice); cudaMemcpy(d_v2, vector2, vector_size, cudaMemcpyHostToDevice); // launch atomic func kernel cudaEventRecord(start, 0); atomic_function_GPU_big_dot<<<grid, block>>>(d_v1, d_v2, d_result_atomic_fun, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime2, start, stop); printf("Kernel atomic_function execution time: %3.10f sec\n", elapsedTime2 / 1000); // copy result back to host cudaMemcpy(GPUResult_atmoic_function, d_result_atomic_fun, sizeof(float), cudaMemcpyDeviceToHost); // kernel3 : atomic lock and shared memory and parallel reduction // copy vectors from host to device cudaMemcpy(d_v1, vector1, vector_size, cudaMemcpyHostToDevice); cudaMemcpy(d_v2, vector2, vector_size, cudaMemcpyHostToDevice); // launch atomic lock kernel Lock lock; cudaEventRecord(start, 0); atomic_lock_GPU_big_dot<<<grid, block>>>(d_v1, d_v2, d_result_atomic_lock, N, lock); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime3, start, stop); printf("Kernel atomic_lock execution time: %3.10f sec\n", elapsedTime3 / 1000); // copy result back to host cudaMemcpy(GPUResult_atmoic_lock, d_result_atomic_lock, sizeof(float), cudaMemcpyDeviceToHost); // cleanup cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_v1); cudaFree(d_v2); cudaFree(d_result); cudaFree(d_result_atomic_fun); cudaFree(d_result_atomic_lock); float speedup1 = elapsedTime1 / elapsedTime2; printf("speed up for atomic function = %.5f\t\n", speedup1); float speedup2 = elapsedTime1 / elapsedTime3; printf("speed up for atomic lock = %.5f\t\n", speedup2); printf("kernel1 shared memory and parallel reduction computation result = " "%f\t\n", gpuSum); printf("kernel2 atomic function ,shared memory and parallel reduction " "computation result = %f\t\n", GPUResult_atmoic_function); printf("kernel3 atomic lock ,shared memory and parallel reduction " "computation result = %f\t\n", GPUResult_atmoic_lock); return 0; }
c8cbbbd51f2ef3bf2b223759b1faa1577e326da9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" #define DEFAULT_THRESHOLD 4000 #define DEFAULT_FILENAME "BWstop-sign.ppm" unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){ if ( !filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } FILE *fp; fprintf(stderr, "read_ppm( %s )\n", filename); fp = fopen( filename, "rb"); if (!fp) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; //int num = read(fd, chars, 1000); int num = fread(chars, sizeof(char), 1000, fp); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char *ptr = chars+3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); *xsize = width; *ysize = height; *maxval = maxvalue; unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if ((*maxval) > 255) bufsize *= 2; unsigned char *buf = (unsigned char *)malloc( bufsize ); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // TODO really read char duh[80]; char *line = chars; // find the start of the pixel data. no doubt stupid sprintf(duh, "%d\0", *xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; //lseek(fd, offset, SEEK_SET); // move to the correct offset fseek(fp, offset, SEEK_SET); // move to the correct offset //long numread = read(fd, buf, bufsize); long numread = fread(buf, sizeof(char), bufsize, fp); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); fclose(fp); int pixels = (*xsize) * (*ysize); for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel return pic; // success } void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) { FILE *fp; //int x,y; fp = fopen(filename, "w"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n"); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i=0; i<numpix; i++) { unsigned char uc = (unsigned char) pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); } int main( int argc, char **argv ) { int thresh = DEFAULT_THRESHOLD; int number_of_files = 20000;//21312; //filename = strdup( DEFAULT_FILENAME); hipEvent_t start_event, stop_event; float seq_time_gpu; if(argc > 1) { number_of_files = atoi(argv[1]); } hipEventCreate(&start_event); hipEventCreate(&stop_event); hipEventRecord(start_event, 0); for(int k = 1; k <= number_of_files; k++) { char *in_filename = (char*)malloc(36 * sizeof(char)); char *out_filename = (char*)malloc(36 * sizeof(char)); sprintf(in_filename, "./sintel/sintel%03d.ppm", k); sprintf(out_filename, "./sintel-sobel-seq/sintel-sobel%03d.ppm", k); int xsize, ysize, maxval; unsigned int *pic = read_ppm( in_filename, &xsize, &ysize, &maxval ); int numbytes = xsize * ysize * 3 * sizeof( int ); int *result = (int *) malloc( numbytes ); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } int i, j, magnitude, sum1, sum2; int *out = result; for (int col=0; col<ysize; col++) { for (int row=0; row<xsize; row++) { *out++ = 0; } } for (i = 1; i < ysize - 1; i++) { for (j = 1; j < xsize -1; j++) { int offset = i*xsize + j; sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ] + 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ] + pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ]; sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ] - pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > thresh) result[offset] = 255; else result[offset] = 0; } } write_ppm( out_filename, xsize, ysize, 255, result); } hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&seq_time_gpu,start_event, stop_event); printf("Sequential Time: %.2f msec\n", seq_time_gpu); fprintf(stderr, "sobel done\n"); }
c8cbbbd51f2ef3bf2b223759b1faa1577e326da9.cu
#include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include "string.h" #define DEFAULT_THRESHOLD 4000 #define DEFAULT_FILENAME "BWstop-sign.ppm" unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){ if ( !filename || filename[0] == '\0') { fprintf(stderr, "read_ppm but no file name\n"); return NULL; // fail } FILE *fp; fprintf(stderr, "read_ppm( %s )\n", filename); fp = fopen( filename, "rb"); if (!fp) { fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename); return NULL; // fail } char chars[1024]; //int num = read(fd, chars, 1000); int num = fread(chars, sizeof(char), 1000, fp); if (chars[0] != 'P' || chars[1] != '6') { fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename); return NULL; } unsigned int width, height, maxvalue; char *ptr = chars+3; // P 6 newline if (*ptr == '#') // comment line! { ptr = 1 + strstr(ptr, "\n"); } num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue); fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue); *xsize = width; *ysize = height; *maxval = maxvalue; unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int)); if (!pic) { fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height); return NULL; // fail but return } // allocate buffer to read the rest of the file into int bufsize = 3 * width * height * sizeof(unsigned char); if ((*maxval) > 255) bufsize *= 2; unsigned char *buf = (unsigned char *)malloc( bufsize ); if (!buf) { fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize); return NULL; // fail but return } // TODO really read char duh[80]; char *line = chars; // find the start of the pixel data. no doubt stupid sprintf(duh, "%d\0", *xsize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *ysize); line = strstr(line, duh); //fprintf(stderr, "%s found at offset %d\n", duh, line-chars); line += strlen(duh) + 1; sprintf(duh, "%d\0", *maxval); line = strstr(line, duh); fprintf(stderr, "%s found at offset %d\n", duh, line - chars); line += strlen(duh) + 1; long offset = line - chars; //lseek(fd, offset, SEEK_SET); // move to the correct offset fseek(fp, offset, SEEK_SET); // move to the correct offset //long numread = read(fd, buf, bufsize); long numread = fread(buf, sizeof(char), bufsize, fp); fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize); fclose(fp); int pixels = (*xsize) * (*ysize); for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel return pic; // success } void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) { FILE *fp; //int x,y; fp = fopen(filename, "w"); if (!fp) { fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n"); exit(-1); } fprintf(fp, "P6\n"); fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval); int numpix = xsize * ysize; for (int i=0; i<numpix; i++) { unsigned char uc = (unsigned char) pic[i]; fprintf(fp, "%c%c%c", uc, uc, uc); } fclose(fp); } int main( int argc, char **argv ) { int thresh = DEFAULT_THRESHOLD; int number_of_files = 20000;//21312; //filename = strdup( DEFAULT_FILENAME); cudaEvent_t start_event, stop_event; float seq_time_gpu; if(argc > 1) { number_of_files = atoi(argv[1]); } cudaEventCreate(&start_event); cudaEventCreate(&stop_event); cudaEventRecord(start_event, 0); for(int k = 1; k <= number_of_files; k++) { char *in_filename = (char*)malloc(36 * sizeof(char)); char *out_filename = (char*)malloc(36 * sizeof(char)); sprintf(in_filename, "./sintel/sintel%03d.ppm", k); sprintf(out_filename, "./sintel-sobel-seq/sintel-sobel%03d.ppm", k); int xsize, ysize, maxval; unsigned int *pic = read_ppm( in_filename, &xsize, &ysize, &maxval ); int numbytes = xsize * ysize * 3 * sizeof( int ); int *result = (int *) malloc( numbytes ); if (!result) { fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes); exit(-1); // fail } int i, j, magnitude, sum1, sum2; int *out = result; for (int col=0; col<ysize; col++) { for (int row=0; row<xsize; row++) { *out++ = 0; } } for (i = 1; i < ysize - 1; i++) { for (j = 1; j < xsize -1; j++) { int offset = i*xsize + j; sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ] + 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ] + pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ]; sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ] - pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ]; magnitude = sum1*sum1 + sum2*sum2; if (magnitude > thresh) result[offset] = 255; else result[offset] = 0; } } write_ppm( out_filename, xsize, ysize, 255, result); } cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&seq_time_gpu,start_event, stop_event); printf("Sequential Time: %.2f msec\n", seq_time_gpu); fprintf(stderr, "sobel done\n"); }
c618dd726ee6f30bc283c979ceb52df4188a7a71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" __global__ void reverse_array(int* arr, int N) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N / 2) { int temp = arr[tid]; arr[tid] = arr[N - 1 - tid]; arr[N - 1 - tid] = temp; } } int main(int argc, char const* argv[]) { int N = 10; int* arr = (int*) malloc(N * sizeof(int)); for (int i = 0; i < N; ++i) { arr[i] = i; } int* dev_arr; cudaCheck( hipMalloc((void**)&dev_arr, N * sizeof(int)) ); cudaCheck( hipMemcpy(dev_arr, arr, N * sizeof(int), hipMemcpyHostToDevice) ); dim3 grid_dim = dim3((N / BLOCK_SIZE + N % BLOCK_SIZE ? 1 : 0), 1, 1); dim3 block_dim = dim3(MIN(N / 2, BLOCK_SIZE), 1, 1); hipLaunchKernelGGL(( reverse_array) , dim3(grid_dim), dim3(block_dim), 0, 0, dev_arr, N); cudaCheck( hipMemcpy(arr, dev_arr, N * sizeof(int), hipMemcpyDeviceToHost) ); printf("Array reverseado\n"); for (int i = 0; i < N; ++i) { printf("%d ", arr[i] ); } printf("\n"); cudaCheck( hipFree(dev_arr) ); return 0; }
c618dd726ee6f30bc283c979ceb52df4188a7a71.cu
#include "utils.h" __global__ void reverse_array(int* arr, int N) { int tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid < N / 2) { int temp = arr[tid]; arr[tid] = arr[N - 1 - tid]; arr[N - 1 - tid] = temp; } } int main(int argc, char const* argv[]) { int N = 10; int* arr = (int*) malloc(N * sizeof(int)); for (int i = 0; i < N; ++i) { arr[i] = i; } int* dev_arr; cudaCheck( cudaMalloc((void**)&dev_arr, N * sizeof(int)) ); cudaCheck( cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice) ); dim3 grid_dim = dim3((N / BLOCK_SIZE + N % BLOCK_SIZE ? 1 : 0), 1, 1); dim3 block_dim = dim3(MIN(N / 2, BLOCK_SIZE), 1, 1); reverse_array <<< grid_dim, block_dim>>>(dev_arr, N); cudaCheck( cudaMemcpy(arr, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost) ); printf("Array reverseado\n"); for (int i = 0; i < N; ++i) { printf("%d ", arr[i] ); } printf("\n"); cudaCheck( cudaFree(dev_arr) ); return 0; }
julia_cpu_SU.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #include "cpu_bitmaptest.h" //cpu_bitmap #define DIM 1000 struct hipComplex { float r; float i; hipComplex(float a, float b) : r(a), i(b) {} float magnitude2(void) { return r * r + i * i; } hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } hipComplex operator+(const hipComplex& a) { return hipComplex(r + a.r, i + a.i); } }; int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); hipComplex c(-0.8, 0.156); hipComplex a(jx, jy); int i = 0; for (i = 0; i<200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } /// //DataBlock struct DataBlock { unsigned char *dev_bitmap; // CPUBitmap *bitmap; }; void kernel(unsigned char *ptr,int ticks){ printf("kernel:%d\n",ticks); for (int y = 0; y<DIM; y++) { for (int x = 0; x<DIM; x++) { int offset = x + y * DIM; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = (255-ticks*10) * juliaValue; ptr[offset * 4 + 1] = (0 + ticks * 15)* juliaValue; ptr[offset * 4 + 2] = (100 + ticks * 25)* juliaValue; //ptrticks ptr[offset * 4 + 3] = 255; // } } } // void generate_frame(DataBlock *d, int ticks) { unsigned char *ptr = d->dev_bitmap; // kernel(ptr, ticks); // } void cleanup(DataBlock *d) { free(d->dev_bitmap); } int main(void) { DataBlock data; CPUBitmap bitmap(DIM, DIM,&data); data.bitmap = &bitmap; data.dev_bitmap = bitmap.get_ptr(); //unsigned char *ptr =bitmap.get_ptr(); //kernel(ptr,1); //printf("frame:%p\n", generate_frame); bitmap.display_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup); }
julia_cpu_SU.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ #include "../common/book.h" #include "cpu_bitmaptest.h" //引用cpu_bitmap的副本 #define DIM 1000 struct cuComplex { float r; float i; cuComplex(float a, float b) : r(a), i(b) {} float magnitude2(void) { return r * r + i * i; } cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } cuComplex operator+(const cuComplex& a) { return cuComplex(r + a.r, i + a.i); } }; int julia(int x, int y) { const float scale = 1.5; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); cuComplex c(-0.8, 0.156); cuComplex a(jx, jy); int i = 0; for (i = 0; i<200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return 0; } return 1; } ///以上为朱丽叶级数相关,原模原样复制过来,不用改动 //DataBlock的作用是在各个函数之间传递图像的内存 struct DataBlock { unsigned char *dev_bitmap; //存储要涂色的区域 CPUBitmap *bitmap; }; void kernel(unsigned char *ptr,int ticks){ printf("kernel:%d\n",ticks); for (int y = 0; y<DIM; y++) { for (int x = 0; x<DIM; x++) { int offset = x + y * DIM; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = (255-ticks*10) * juliaValue; ptr[offset * 4 + 1] = (0 + ticks * 15)* juliaValue; ptr[offset * 4 + 2] = (100 + ticks * 25)* juliaValue; //前三个ptr用来上色,为了让变色更明显,所以令ticks乘以一个值 ptr[offset * 4 + 3] = 255; //决定透明度 } } } //不断变色 void generate_frame(DataBlock *d, int ticks) { unsigned char *ptr = d->dev_bitmap; //获取图像像素点的内存地址 kernel(ptr, ticks); //这儿是生成图像的核函数 } void cleanup(DataBlock *d) { free(d->dev_bitmap); } int main(void) { DataBlock data; CPUBitmap bitmap(DIM, DIM,&data); data.bitmap = &bitmap; data.dev_bitmap = bitmap.get_ptr(); //unsigned char *ptr =bitmap.get_ptr(); //kernel(ptr,1); //printf("frame:%p\n", generate_frame); bitmap.display_and_exit((void(*)(void*, int))generate_frame, (void(*)(void*))cleanup); }
f29d284eaa98ec211c3adf4f3743bb9097e51fda.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d macro: PAPI Error: function " #papi_routine " failed with ret=%d [%s].\n", \ __FILE__, __LINE__, _papiret, PAPI_strerror(_papiret)); \ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with message '%s'.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ const char *errName=NULL, *errStr=NULL; \ hipError_t _e1 = hipGetErrorName(_status, &errName); \ hipError_t _e2 = hipGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with message'%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n",\ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define NUM_METRIC (18) #define NUM_EVENTS (2) #define MAX_SIZE (64*1024*1024) // 64 MB typedef union { long long ll; unsigned long long ull; double d; void *vp; unsigned char ch[8]; } convert_64_t; typedef struct { char name[128]; long long value; } eventStore_t; int eventsFoundCount = 0; // occupants of the array. int eventsFoundMax; // Size of the array. int eventsFoundAdd = 32; // Blocksize for increasing the array. eventStore_t *eventsFound = NULL; // The array. int Streams; // Gets asyncEngineCount (number of physical copy engines). int cpuToGpu = 0; int gpuToGpu = 0; size_t bufferSize = 0; int *deviceEvents = NULL; hipDeviceptr_t *pDevBuffer0 = NULL; hipDeviceptr_t *pDevBuffer1 = NULL; float **pHostBuffer = NULL; hipStream_t *cudaStreams = NULL; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from 'source' (on one GPU) to 'dest' // on another GPU. //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *source, float *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = source[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) /* compute elements in an array */ //----------------------------------------------------------------------------- // FreeGlobals: Frees globally allocated memories. //----------------------------------------------------------------------------- void FreeGlobals(void) { int i; free(deviceEvents); for(i=0; i<Streams; i++) { RUNTIME_API_CALL(hipSetDevice(0)); // device 0 for pDevBuffer0. RUNTIME_API_CALL(hipFree((void **) &pDevBuffer0[i])); // Free allocated space. free(pHostBuffer[i]); // Just locally allocateed. } free(pDevBuffer0); // all contents freed by above. free(pHostBuffer); // Free the pointers. free(pDevBuffer1); // contents freed by the way the tests work. for (i=0; i<Streams; i++) { // Destroy all streams. if (cudaStreams[i] != NULL) { RUNTIME_API_CALL(hipStreamDestroy(cudaStreams[i])); } } free(cudaStreams); // Free the memory for pointers. } // end routine. //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // Copy buffers from host to device, vice versa, both simultaneously. //----------------------------------------------------------------------------- void testCpuToGpu(hipDeviceptr_t * pDevBuffer, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preperation for a transfer // between devices. //----------------------------------------------------------------------------- void testGpuToGpu_part1(hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); //printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); // printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { hipLaunchKernelGGL(( test_nvlink_bandwidth) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, (float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); } } // end routine. //----------------------------------------------------------------------------- // conducts test CpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductCpuToGpu(int EventSet, int device, long long *values) { int i; if (device == 0) { CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer0, pHostBuffer, bufferSize, cudaStreams); } else { RUNTIME_API_CALL(hipSetDevice(device)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); } CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); for (i=0; i<Streams; i++) { RUNTIME_API_CALL(hipFree((void **) pDevBuffer1[i])); } } // end testing device other than 0. CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read any values. } // end routine. //----------------------------------------------------------------------------- // conducts test GpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductGpuToGpu(int EventSet, int device, long long *values) { int i; // Need to target another GPU. I already have pDevBuffer0 on device 0. int partner=device; // Presume event is not on zero. if (device == 0) partner=1; // If it is on zero, make partner 1. RUNTIME_API_CALL(hipSetDevice(0)); // Device 0 must RUNTIME_API_CALL(hipDeviceEnablePeerAccess(partner, 0)); // access partner. RUNTIME_API_CALL(hipSetDevice(partner)); // The partner device must access 0. RUNTIME_API_CALL(hipDeviceEnablePeerAccess(0, 0)); // Let non-zero device access 0. for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); } // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); // What we want to time: Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read value. // Disable peer access. RUNTIME_API_CALL(hipSetDevice(0)); RUNTIME_API_CALL(hipDeviceDisablePeerAccess(partner)); // Kill connection to device i. RUNTIME_API_CALL(hipSetDevice(partner)); RUNTIME_API_CALL(hipDeviceDisablePeerAccess(0)); // Kill access to device 0. // Now free the pointers on device 'partner' (never 0). for (i=0; i<Streams; i++) { RUNTIME_API_CALL(hipFree((void **) pDevBuffer1[i])); } RUNTIME_API_CALL(hipSetDevice(0)); // return to default pointer. } // end routine. //----------------------------------------------------------------------------- // Show help. //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -h, -help, --help: display this help message.\n"); printf(" Otherwise, exactly one of these options:\n"); printf(" --cpu-to-gpu: Show results for data transfer between CPU and GPU.\n"); printf(" --gpu-to-gpu: Show results for data transfer between two GPUs.\n"); } // end routine. //----------------------------------------------------------------------------- // Interpret command line flags. //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); printUsage(); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { printf("Failed to understand argument '%s'.\n", argv[1]); printUsage(); exit(-1); } } // end routine. //----------------------------------------------------------------------------- // Add an entry to the eventsFound array. On entry we always have room. //----------------------------------------------------------------------------- void addEventsFound(char *eventName, long long value) { strncpy(eventsFound[eventsFoundCount].name, eventName, 127); // Copy up to 127 chars. eventsFound[eventsFoundCount].value = value; // Copy the value. if (++eventsFoundCount >= eventsFoundMax) { // bump count, if too much, make room. eventsFoundMax += eventsFoundAdd; // Add. eventsFound = (eventStore_t*) realloc(eventsFound, eventsFoundMax*sizeof(eventStore_t)); // Make new room. memset(eventsFound+(eventsFoundMax-eventsFoundAdd), 0, eventsFoundAdd*sizeof(eventStore_t)); // zero it. } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int device, deviceCount = 0, i = 0; size_t freeMemory = 0, totalMemory = 0; char str[64]; eventsFoundMax = eventsFoundAdd; // space allocated. eventsFound = (eventStore_t*) calloc(eventsFoundMax, sizeof(eventStore_t)); // make some space. hipDeviceProp_t prop[MAX_DEVICES]; // Parse command line arguments parseCommandLineArgs(argc, argv); DRIVER_API_CALL(hipInit(0)); RUNTIME_API_CALL(hipGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(device = 0; device < deviceCount; device++) { RUNTIME_API_CALL(hipGetDeviceProperties(&prop[device], device)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space deviceEvents= (int*) calloc(deviceCount, sizeof(int)); pDevBuffer0 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pDevBuffer1 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (hipStream_t*) calloc(Streams, sizeof(hipStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(hipMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Nvlink-topology Records are generated even before hipMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0)); // fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; int eventCount; int retval; int k, m, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); FreeGlobals(); exit(-1); } printf("PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); FreeGlobals(); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find cuda component among %i " "reported components.\n", k); FreeGlobals(); PAPI_shutdown(); exit(-1); } printf("Found CUDA Component at id %d\n", cid); // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 // Just profile devices to match the CUPTI example eventCount = 0; int eventsRead=0; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Begin enumeration of all events. if (cpuToGpu) printf("Experiment timing memory copy from host to GPU.\n"); if (gpuToGpu) printf("Experiment timing memory copy between GPU 0 and each other GPU.\n"); printf("Events with numeric values were read; if they are zero, they may not \n" "be operational, or the exercises performed by this code do not affect \n" "them. We report all 'nvlink' events presented by the cuda component. \n" "\n" "---------------------------Event Name---------------------------:---Value---\n"); PAPI_event_info_t info; // To get event enumeration info. m=PAPI_NATIVE_MASK; // Get the PAPI NATIVE mask. CALL_PAPI_OK(PAPI_enum_cmp_event(&m,PAPI_ENUM_FIRST,cid)); // Begin enumeration of ALL papi counters. do { // Enumerate all events. memset(&info,0,sizeof(PAPI_event_info_t)); // Clear event info. k=m; // Make a copy of current code. // enumerate sub-events, with masks. For this test, we do not // have any! But we do this to test our enumeration works as // expected. First time through is guaranteed, of course. do { // enumerate masked events. CALL_PAPI_OK(PAPI_get_event_info(k,&info)); // get name of k symbol. if (strstr(info.symbol, "nvlink") == NULL) continue; // skip if not an nvlink event. char *devstr = strstr(info.symbol, "device="); // look for device enumerator. if (devstr == NULL) continue; // Skip if no device present. device=atoi(devstr+7); // Get the device id, for info. // fprintf(stderr, "Found nvlink symbol '%s', device=%i.\n", info.symbol , device); if (device < 0 || device >= deviceCount) continue; // skip any not in range. deviceEvents[device]++; // Add to count of events on this device. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); retval = PAPI_add_named_event(EventSet, info.symbol); // Don't want to fail program if name not found... if(retval == PAPI_OK) { eventCount++; // Bump number of events we could test. } else { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. continue; } long long value=-1; // The only value we read. // ===== Allocate Memory ===================================== if(cpuToGpu) { conductCpuToGpu(EventSet, device, &value); // Just one value for now. } else if(gpuToGpu) { conductGpuToGpu(EventSet, device, &value); // Just one value for now. } addEventsFound(info.symbol, value); // Add to events we were able to read. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. // report each event counted. if (value >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, value); if (value == 0) { printf("%-64s: %9s (not exercised by current test code.)\n", info.symbol, str); } else { printf("%-64s: %9s\n", info.symbol, str); } } else { printf("%-64s: Failed to read.\n", info.symbol); } } while(PAPI_enum_cmp_event(&k,PAPI_NTV_ENUM_UMASKS,cid)==PAPI_OK); // Get next umask entry (bits different) (should return PAPI_NOEVNT). } while(PAPI_enum_cmp_event(&m,PAPI_ENUM_EVENTS,cid)==PAPI_OK); // Get next event code. if (eventCount < 1) { // If we failed on all of them, fprintf(stderr, "Unable to add any NVLINK events; they are not present in the component.\n"); fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } if (eventsRead < 1) { // If failed to read any, printf("\nFailed to read any nvlink events.\n"); // report a failure. fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } printf("\nTotal nvlink events identified: %i.\n\n", eventsFoundCount); if (eventsFoundCount < 2) { // If failed to get counts on any, printf("Insufficient events are exercised by the current test code to perform pair testing.\n"); // report a failure. FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(0); // exit no matter what. } for (i=0; i<deviceCount; i++) { printf("Device %i has %i events. %i potential pairings per device.\n", i, deviceEvents[i], deviceEvents[i]*(deviceEvents[i]-1)/2); } // Begin pair testing. We consider every possible pairing of events // that, tested alone, returned a value greater than zero. int mainEvent, pairEvent, mainDevice, pairDevice; long long saveValues[2]; long long readValues[2]; int goodOnSame=0, failOnDiff=0, badSameCombo=0, pairProblems=0; // Some counters. int type; // 0 succeed on same device, 1 = fail across devices. for (type=0; type<2; type++) { if (type == 0) { printf("List of Pairings on SAME device:\n"); printf("* means value changed by more than 10%% when paired (vs measured singly, above).\n"); printf("^ means a pair was rejected as an invalid combo.\n"); } else { printf("List of Failed Pairings on DIFFERENT devices:\n"); } for (mainEvent = 0; mainEvent<eventsFoundCount-1; mainEvent++) { // Through all but one events. char *devstr = strstr(eventsFound[mainEvent].name, "device="); // look for device enumerator. mainDevice=atoi(devstr+7); // Get the device id. for (pairEvent = mainEvent+1; pairEvent<eventsFoundCount; pairEvent++) { // Through all possible pairs, devstr = strstr(eventsFound[pairEvent].name, "device="); // look for device enumerator. pairDevice=atoi(devstr+7); // Get the device id. if (type == 0 && mainDevice != pairDevice) continue; // Skip if we need same device. if (type == 1 && mainDevice == pairDevice) continue; // Skip if we need different devices. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); CALL_PAPI_OK(PAPI_add_named_event(EventSet, eventsFound[mainEvent].name)); // Here we must examine the return code. int ret = PAPI_add_named_event(EventSet, eventsFound[pairEvent].name); if (type == 0 && ret == PAPI_ECOMBO) { // A bad combination when looking for valid combos. printf("%c %64s + %-64s [Invalid Combo]\n", '^', // report it. eventsFound[mainEvent].name, eventsFound[pairEvent].name); badSameCombo++; // .. count an explicit rejection. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. done with event set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (type == 1 && ret == PAPI_ECOMBO) { // A bad combination when we are looking for that. printf("%64s + %-64s BAD COMBINATION ACROSS DEVICES.\n", eventsFound[mainEvent].name, eventsFound[pairEvent].name); // report it. failOnDiff++; // count the bad combos. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. don't need to go further. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (ret != PAPI_OK) { // If it failed for some other reason, fprintf(stderr, "%s:%d Attempt to add event '%s' to set " "with event '%s' produced an unexpected error: " "[%s]. Ignoring this pair.\n", __FILE__, __LINE__, eventsFound[pairEvent], eventsFound[mainEvent], PAPI_strerror(ret)); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. didn't work. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair. In type 1, we just skip it, // because we presume a single event on a device isn't changed // by any event on another device. if (type == 1) { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. worked fine; don't measure it. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair, in type 0, get a measurement. readValues[0]= -1; readValues[1] = -1; if(cpuToGpu) { conductCpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } else if(gpuToGpu) { conductGpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } goodOnSame++; // Was accepted by cuda as a valid pairing. // For the checks, we add 2 (so -1 becomes +1) to avoid any // divide by zeros. It won't make a significant difference // in the ratios. (none if readings are the same). double mainSingle = (2.0 + eventsFound[mainEvent].value); // Get value when read alone. double pairSingle = (2.0 + eventsFound[pairEvent].value); // .. double mainCheck = mainSingle/(2.0 + saveValues[0]); // Get ratio when paired. double pairCheck = pairSingle/(2.0 + saveValues[1]); // .. char flag=' ', flag1=' ', flag2=' '; // Presume all okay. if (mainCheck < 0.90 || mainCheck > 1.10) flag1='*'; // Flag as significantly different for main. if (pairCheck < 0.90 || pairCheck > 1.10) flag2='*'; // Flag as significantly different for pair. if (flag1 == '*' || flag2 == '*') { pairProblems++; // Remember number of problems. flag = '*'; // set global flag. } printf("%c %64s + %-64s [", flag, eventsFound[mainEvent].name, eventsFound[pairEvent].name); calculateSize(str, saveValues[0]); // Do some pretty formatting, printf("%c%9s,", flag1, str); calculateSize(str, saveValues[1]); printf("%c%9s]\n", flag2, str); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. } } // end loop on all events. if (type == 0) { // For good pairings on same devices, if (goodOnSame == 0) { printf("NO valid pairings of above events if both on the SAME device.\n"); } else { printf("%i valid pairings of above events if both on the SAME device.\n", goodOnSame); } printf("%i unique pairings on SAME device were rejected as bad combinations.\n", badSameCombo); if (pairProblems > 0) { printf("%i pairings resulted in a change of one or both event values > 10%%.\n", pairProblems); } else { printf("No significant change in event values read for any pairings.\n"); } } else { // Must be reporting bad pairings across devies. if (failOnDiff == 0) printf("NO failed pairings of above events if each on a DIFFERENT device.\n"); else printf("%i failed pairings of above events with each on a DIFFERENT device.\n", failOnDiff); } } // end loop on type. PAPI_shutdown(); // Returns no value. return(0); // exit OK. } // end MAIN.
f29d284eaa98ec211c3adf4f3743bb9097e51fda.cu
/* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d macro: PAPI Error: function " #papi_routine " failed with ret=%d [%s].\n", \ __FILE__, __LINE__, _papiret, PAPI_strerror(_papiret)); \ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with message '%s'.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ const char *errName=NULL, *errStr=NULL; \ CUresult _e1 = cuGetErrorName(_status, &errName); \ CUresult _e2 = cuGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with message'%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n",\ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define NUM_METRIC (18) #define NUM_EVENTS (2) #define MAX_SIZE (64*1024*1024) // 64 MB typedef union { long long ll; unsigned long long ull; double d; void *vp; unsigned char ch[8]; } convert_64_t; typedef struct { char name[128]; long long value; } eventStore_t; int eventsFoundCount = 0; // occupants of the array. int eventsFoundMax; // Size of the array. int eventsFoundAdd = 32; // Blocksize for increasing the array. eventStore_t *eventsFound = NULL; // The array. int Streams; // Gets asyncEngineCount (number of physical copy engines). int cpuToGpu = 0; int gpuToGpu = 0; size_t bufferSize = 0; int *deviceEvents = NULL; CUdeviceptr *pDevBuffer0 = NULL; CUdeviceptr *pDevBuffer1 = NULL; float **pHostBuffer = NULL; cudaStream_t *cudaStreams = NULL; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from 'source' (on one GPU) to 'dest' // on another GPU. //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *source, float *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = source[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) /* compute elements in an array */ //----------------------------------------------------------------------------- // FreeGlobals: Frees globally allocated memories. //----------------------------------------------------------------------------- void FreeGlobals(void) { int i; free(deviceEvents); for(i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaSetDevice(0)); // device 0 for pDevBuffer0. RUNTIME_API_CALL(cudaFree((void **) &pDevBuffer0[i])); // Free allocated space. free(pHostBuffer[i]); // Just locally allocateed. } free(pDevBuffer0); // all contents freed by above. free(pHostBuffer); // Free the pointers. free(pDevBuffer1); // contents freed by the way the tests work. for (i=0; i<Streams; i++) { // Destroy all streams. if (cudaStreams[i] != NULL) { RUNTIME_API_CALL(cudaStreamDestroy(cudaStreams[i])); } } free(cudaStreams); // Free the memory for pointers. } // end routine. //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // Copy buffers from host to device, vice versa, both simultaneously. //----------------------------------------------------------------------------- void testCpuToGpu(CUdeviceptr * pDevBuffer, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preperation for a transfer // between devices. //----------------------------------------------------------------------------- void testGpuToGpu_part1(CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); //printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); // printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { test_nvlink_bandwidth <<< GRID_SIZE, BLOCK_SIZE >>> ((float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); } } // end routine. //----------------------------------------------------------------------------- // conducts test CpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductCpuToGpu(int EventSet, int device, long long *values) { int i; if (device == 0) { CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer0, pHostBuffer, bufferSize, cudaStreams); } else { RUNTIME_API_CALL(cudaSetDevice(device)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); } CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); for (i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaFree((void **) pDevBuffer1[i])); } } // end testing device other than 0. CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read any values. } // end routine. //----------------------------------------------------------------------------- // conducts test GpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductGpuToGpu(int EventSet, int device, long long *values) { int i; // Need to target another GPU. I already have pDevBuffer0 on device 0. int partner=device; // Presume event is not on zero. if (device == 0) partner=1; // If it is on zero, make partner 1. RUNTIME_API_CALL(cudaSetDevice(0)); // Device 0 must RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(partner, 0)); // access partner. RUNTIME_API_CALL(cudaSetDevice(partner)); // The partner device must access 0. RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(0, 0)); // Let non-zero device access 0. for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); } // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); // What we want to time: Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read value. // Disable peer access. RUNTIME_API_CALL(cudaSetDevice(0)); RUNTIME_API_CALL(cudaDeviceDisablePeerAccess(partner)); // Kill connection to device i. RUNTIME_API_CALL(cudaSetDevice(partner)); RUNTIME_API_CALL(cudaDeviceDisablePeerAccess(0)); // Kill access to device 0. // Now free the pointers on device 'partner' (never 0). for (i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaFree((void **) pDevBuffer1[i])); } RUNTIME_API_CALL(cudaSetDevice(0)); // return to default pointer. } // end routine. //----------------------------------------------------------------------------- // Show help. //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -h, -help, --help: display this help message.\n"); printf(" Otherwise, exactly one of these options:\n"); printf(" --cpu-to-gpu: Show results for data transfer between CPU and GPU.\n"); printf(" --gpu-to-gpu: Show results for data transfer between two GPUs.\n"); } // end routine. //----------------------------------------------------------------------------- // Interpret command line flags. //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); printUsage(); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { printf("Failed to understand argument '%s'.\n", argv[1]); printUsage(); exit(-1); } } // end routine. //----------------------------------------------------------------------------- // Add an entry to the eventsFound array. On entry we always have room. //----------------------------------------------------------------------------- void addEventsFound(char *eventName, long long value) { strncpy(eventsFound[eventsFoundCount].name, eventName, 127); // Copy up to 127 chars. eventsFound[eventsFoundCount].value = value; // Copy the value. if (++eventsFoundCount >= eventsFoundMax) { // bump count, if too much, make room. eventsFoundMax += eventsFoundAdd; // Add. eventsFound = (eventStore_t*) realloc(eventsFound, eventsFoundMax*sizeof(eventStore_t)); // Make new room. memset(eventsFound+(eventsFoundMax-eventsFoundAdd), 0, eventsFoundAdd*sizeof(eventStore_t)); // zero it. } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int device, deviceCount = 0, i = 0; size_t freeMemory = 0, totalMemory = 0; char str[64]; eventsFoundMax = eventsFoundAdd; // space allocated. eventsFound = (eventStore_t*) calloc(eventsFoundMax, sizeof(eventStore_t)); // make some space. cudaDeviceProp prop[MAX_DEVICES]; // Parse command line arguments parseCommandLineArgs(argc, argv); DRIVER_API_CALL(cuInit(0)); RUNTIME_API_CALL(cudaGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(device = 0; device < deviceCount; device++) { RUNTIME_API_CALL(cudaGetDeviceProperties(&prop[device], device)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space deviceEvents= (int*) calloc(deviceCount, sizeof(int)); pDevBuffer0 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pDevBuffer1 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (cudaStream_t*) calloc(Streams, sizeof(cudaStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(cudaMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Nvlink-topology Records are generated even before cudaMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0)); // fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; int eventCount; int retval; int k, m, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); FreeGlobals(); exit(-1); } printf("PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); FreeGlobals(); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find cuda component among %i " "reported components.\n", k); FreeGlobals(); PAPI_shutdown(); exit(-1); } printf("Found CUDA Component at id %d\n", cid); // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 // Just profile devices to match the CUPTI example eventCount = 0; int eventsRead=0; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Begin enumeration of all events. if (cpuToGpu) printf("Experiment timing memory copy from host to GPU.\n"); if (gpuToGpu) printf("Experiment timing memory copy between GPU 0 and each other GPU.\n"); printf("Events with numeric values were read; if they are zero, they may not \n" "be operational, or the exercises performed by this code do not affect \n" "them. We report all 'nvlink' events presented by the cuda component. \n" "\n" "---------------------------Event Name---------------------------:---Value---\n"); PAPI_event_info_t info; // To get event enumeration info. m=PAPI_NATIVE_MASK; // Get the PAPI NATIVE mask. CALL_PAPI_OK(PAPI_enum_cmp_event(&m,PAPI_ENUM_FIRST,cid)); // Begin enumeration of ALL papi counters. do { // Enumerate all events. memset(&info,0,sizeof(PAPI_event_info_t)); // Clear event info. k=m; // Make a copy of current code. // enumerate sub-events, with masks. For this test, we do not // have any! But we do this to test our enumeration works as // expected. First time through is guaranteed, of course. do { // enumerate masked events. CALL_PAPI_OK(PAPI_get_event_info(k,&info)); // get name of k symbol. if (strstr(info.symbol, "nvlink") == NULL) continue; // skip if not an nvlink event. char *devstr = strstr(info.symbol, "device="); // look for device enumerator. if (devstr == NULL) continue; // Skip if no device present. device=atoi(devstr+7); // Get the device id, for info. // fprintf(stderr, "Found nvlink symbol '%s', device=%i.\n", info.symbol , device); if (device < 0 || device >= deviceCount) continue; // skip any not in range. deviceEvents[device]++; // Add to count of events on this device. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); retval = PAPI_add_named_event(EventSet, info.symbol); // Don't want to fail program if name not found... if(retval == PAPI_OK) { eventCount++; // Bump number of events we could test. } else { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. continue; } long long value=-1; // The only value we read. // ===== Allocate Memory ===================================== if(cpuToGpu) { conductCpuToGpu(EventSet, device, &value); // Just one value for now. } else if(gpuToGpu) { conductGpuToGpu(EventSet, device, &value); // Just one value for now. } addEventsFound(info.symbol, value); // Add to events we were able to read. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. // report each event counted. if (value >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, value); if (value == 0) { printf("%-64s: %9s (not exercised by current test code.)\n", info.symbol, str); } else { printf("%-64s: %9s\n", info.symbol, str); } } else { printf("%-64s: Failed to read.\n", info.symbol); } } while(PAPI_enum_cmp_event(&k,PAPI_NTV_ENUM_UMASKS,cid)==PAPI_OK); // Get next umask entry (bits different) (should return PAPI_NOEVNT). } while(PAPI_enum_cmp_event(&m,PAPI_ENUM_EVENTS,cid)==PAPI_OK); // Get next event code. if (eventCount < 1) { // If we failed on all of them, fprintf(stderr, "Unable to add any NVLINK events; they are not present in the component.\n"); fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } if (eventsRead < 1) { // If failed to read any, printf("\nFailed to read any nvlink events.\n"); // report a failure. fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } printf("\nTotal nvlink events identified: %i.\n\n", eventsFoundCount); if (eventsFoundCount < 2) { // If failed to get counts on any, printf("Insufficient events are exercised by the current test code to perform pair testing.\n"); // report a failure. FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(0); // exit no matter what. } for (i=0; i<deviceCount; i++) { printf("Device %i has %i events. %i potential pairings per device.\n", i, deviceEvents[i], deviceEvents[i]*(deviceEvents[i]-1)/2); } // Begin pair testing. We consider every possible pairing of events // that, tested alone, returned a value greater than zero. int mainEvent, pairEvent, mainDevice, pairDevice; long long saveValues[2]; long long readValues[2]; int goodOnSame=0, failOnDiff=0, badSameCombo=0, pairProblems=0; // Some counters. int type; // 0 succeed on same device, 1 = fail across devices. for (type=0; type<2; type++) { if (type == 0) { printf("List of Pairings on SAME device:\n"); printf("* means value changed by more than 10%% when paired (vs measured singly, above).\n"); printf("^ means a pair was rejected as an invalid combo.\n"); } else { printf("List of Failed Pairings on DIFFERENT devices:\n"); } for (mainEvent = 0; mainEvent<eventsFoundCount-1; mainEvent++) { // Through all but one events. char *devstr = strstr(eventsFound[mainEvent].name, "device="); // look for device enumerator. mainDevice=atoi(devstr+7); // Get the device id. for (pairEvent = mainEvent+1; pairEvent<eventsFoundCount; pairEvent++) { // Through all possible pairs, devstr = strstr(eventsFound[pairEvent].name, "device="); // look for device enumerator. pairDevice=atoi(devstr+7); // Get the device id. if (type == 0 && mainDevice != pairDevice) continue; // Skip if we need same device. if (type == 1 && mainDevice == pairDevice) continue; // Skip if we need different devices. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); CALL_PAPI_OK(PAPI_add_named_event(EventSet, eventsFound[mainEvent].name)); // Here we must examine the return code. int ret = PAPI_add_named_event(EventSet, eventsFound[pairEvent].name); if (type == 0 && ret == PAPI_ECOMBO) { // A bad combination when looking for valid combos. printf("%c %64s + %-64s [Invalid Combo]\n", '^', // report it. eventsFound[mainEvent].name, eventsFound[pairEvent].name); badSameCombo++; // .. count an explicit rejection. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. done with event set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (type == 1 && ret == PAPI_ECOMBO) { // A bad combination when we are looking for that. printf("%64s + %-64s BAD COMBINATION ACROSS DEVICES.\n", eventsFound[mainEvent].name, eventsFound[pairEvent].name); // report it. failOnDiff++; // count the bad combos. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. don't need to go further. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (ret != PAPI_OK) { // If it failed for some other reason, fprintf(stderr, "%s:%d Attempt to add event '%s' to set " "with event '%s' produced an unexpected error: " "[%s]. Ignoring this pair.\n", __FILE__, __LINE__, eventsFound[pairEvent], eventsFound[mainEvent], PAPI_strerror(ret)); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. didn't work. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair. In type 1, we just skip it, // because we presume a single event on a device isn't changed // by any event on another device. if (type == 1) { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. worked fine; don't measure it. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair, in type 0, get a measurement. readValues[0]= -1; readValues[1] = -1; if(cpuToGpu) { conductCpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } else if(gpuToGpu) { conductGpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } goodOnSame++; // Was accepted by cuda as a valid pairing. // For the checks, we add 2 (so -1 becomes +1) to avoid any // divide by zeros. It won't make a significant difference // in the ratios. (none if readings are the same). double mainSingle = (2.0 + eventsFound[mainEvent].value); // Get value when read alone. double pairSingle = (2.0 + eventsFound[pairEvent].value); // .. double mainCheck = mainSingle/(2.0 + saveValues[0]); // Get ratio when paired. double pairCheck = pairSingle/(2.0 + saveValues[1]); // .. char flag=' ', flag1=' ', flag2=' '; // Presume all okay. if (mainCheck < 0.90 || mainCheck > 1.10) flag1='*'; // Flag as significantly different for main. if (pairCheck < 0.90 || pairCheck > 1.10) flag2='*'; // Flag as significantly different for pair. if (flag1 == '*' || flag2 == '*') { pairProblems++; // Remember number of problems. flag = '*'; // set global flag. } printf("%c %64s + %-64s [", flag, eventsFound[mainEvent].name, eventsFound[pairEvent].name); calculateSize(str, saveValues[0]); // Do some pretty formatting, printf("%c%9s,", flag1, str); calculateSize(str, saveValues[1]); printf("%c%9s]\n", flag2, str); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. } } // end loop on all events. if (type == 0) { // For good pairings on same devices, if (goodOnSame == 0) { printf("NO valid pairings of above events if both on the SAME device.\n"); } else { printf("%i valid pairings of above events if both on the SAME device.\n", goodOnSame); } printf("%i unique pairings on SAME device were rejected as bad combinations.\n", badSameCombo); if (pairProblems > 0) { printf("%i pairings resulted in a change of one or both event values > 10%%.\n", pairProblems); } else { printf("No significant change in event values read for any pairings.\n"); } } else { // Must be reporting bad pairings across devies. if (failOnDiff == 0) printf("NO failed pairings of above events if each on a DIFFERENT device.\n"); else printf("%i failed pairings of above events with each on a DIFFERENT device.\n", failOnDiff); } } // end loop on type. PAPI_shutdown(); // Returns no value. return(0); // exit OK. } // end MAIN.
9b0490db6d1e2d1c36e56da90190aff83c594cf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_l1; int xdim0_update_halo_kernel1_l1_h = -1; __constant__ int xdim1_update_halo_kernel1_l1; int xdim1_update_halo_kernel1_l1_h = -1; __constant__ int xdim2_update_halo_kernel1_l1; int xdim2_update_halo_kernel1_l1_h = -1; __constant__ int xdim3_update_halo_kernel1_l1; int xdim3_update_halo_kernel1_l1_h = -1; __constant__ int xdim4_update_halo_kernel1_l1; int xdim4_update_halo_kernel1_l1_h = -1; __constant__ int xdim5_update_halo_kernel1_l1; int xdim5_update_halo_kernel1_l1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_l1*(y)) #define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_l1*(y)) #define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_l1*(y)) #define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_l1*(y)) #define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_l1*(y)) #define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_l1*(y)) //user function __device__ inline void update_halo_kernel1_l1_gpu(double *density0, double *energy0, double *energy1, double *u, double *p, double *sd , const int* fields) { if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(1,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(1,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(1,0)]; if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(1,0)]; if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(1,0)]; if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(1,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_update_halo_kernel1_l1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_l1; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_l1; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_l1; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_l1; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_l1; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_l1; if (idx_x < size0 && idx_y < size1) { update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { #else void ops_par_loop_update_halo_kernel1_l1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; #endif //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,7,range,54)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(54,"update_halo_kernel1_l1"); OPS_kernels[54].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != xdim0_update_halo_kernel1_l1_h || xdim1 != xdim1_update_halo_kernel1_l1_h || xdim2 != xdim2_update_halo_kernel1_l1_h || xdim3 != xdim3_update_halo_kernel1_l1_h || xdim4 != xdim4_update_halo_kernel1_l1_h || xdim5 != xdim5_update_halo_kernel1_l1_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_l1_h = xdim0; hipMemcpyToSymbol( xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_l1_h = xdim1; hipMemcpyToSymbol( xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_l1_h = xdim2; hipMemcpyToSymbol( xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_l1_h = xdim3; hipMemcpyToSymbol( xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_l1_h = xdim4; hipMemcpyToSymbol( xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_l1_h = xdim5; } int *arg6h = (int *)arg6.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[7]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[54].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel1_l1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[54].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[54].mpi_time += t2-t1; OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 54; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 54; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 7; desc->args = (ops_arg*)malloc(7*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int)); desc->args[6].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_l1_execute; if (OPS_diags > 1) { ops_timing_realloc(54,"update_halo_kernel1_l1"); } ops_enqueue_kernel(desc); } #endif
9b0490db6d1e2d1c36e56da90190aff83c594cf9.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_l1; int xdim0_update_halo_kernel1_l1_h = -1; __constant__ int xdim1_update_halo_kernel1_l1; int xdim1_update_halo_kernel1_l1_h = -1; __constant__ int xdim2_update_halo_kernel1_l1; int xdim2_update_halo_kernel1_l1_h = -1; __constant__ int xdim3_update_halo_kernel1_l1; int xdim3_update_halo_kernel1_l1_h = -1; __constant__ int xdim4_update_halo_kernel1_l1; int xdim4_update_halo_kernel1_l1_h = -1; __constant__ int xdim5_update_halo_kernel1_l1; int xdim5_update_halo_kernel1_l1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_l1*(y)) #define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_l1*(y)) #define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_l1*(y)) #define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_l1*(y)) #define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_l1*(y)) #define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_l1*(y)) //user function __device__ inline void update_halo_kernel1_l1_gpu(double *density0, double *energy0, double *energy1, double *u, double *p, double *sd , const int* fields) { if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(1,0)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(1,0)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(1,0)]; if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(1,0)]; if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(1,0)]; if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(1,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_update_halo_kernel1_l1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_l1; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_l1; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_l1; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_l1; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_l1; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_l1; if (idx_x < size0 && idx_y < size1) { update_halo_kernel1_l1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { #else void ops_par_loop_update_halo_kernel1_l1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; #endif //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,7,range,54)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(54,"update_halo_kernel1_l1"); OPS_kernels[54].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != xdim0_update_halo_kernel1_l1_h || xdim1 != xdim1_update_halo_kernel1_l1_h || xdim2 != xdim2_update_halo_kernel1_l1_h || xdim3 != xdim3_update_halo_kernel1_l1_h || xdim4 != xdim4_update_halo_kernel1_l1_h || xdim5 != xdim5_update_halo_kernel1_l1_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel1_l1, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_l1_h = xdim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel1_l1, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_l1_h = xdim1; cudaMemcpyToSymbol( xdim2_update_halo_kernel1_l1, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_l1_h = xdim2; cudaMemcpyToSymbol( xdim3_update_halo_kernel1_l1, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_l1_h = xdim3; cudaMemcpyToSymbol( xdim4_update_halo_kernel1_l1, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_l1_h = xdim4; cudaMemcpyToSymbol( xdim5_update_halo_kernel1_l1, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_l1_h = xdim5; } int *arg6h = (int *)arg6.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); char *p_a[7]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); p_a[5] = (char *)args[5].data_d + base5; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[54].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_update_halo_kernel1_l1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[54].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[54].mpi_time += t2-t1; OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[54].transfer += ops_compute_transfer(dim, start, end, &arg5); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_l1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 54; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 54; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 7; desc->args = (ops_arg*)malloc(7*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg6.data,NUM_FIELDS*sizeof(int)); desc->args[6].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_l1_execute; if (OPS_diags > 1) { ops_timing_realloc(54,"update_halo_kernel1_l1"); } ops_enqueue_kernel(desc); } #endif
013ac530a7930e32f911b0980bda12a8511294cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip_runtime.h" #ifndef _KMEANS_CUDA_KERNEL_H_ #define _KMEANS_CUDA_KERNEL_H_ #include <stdio.h> #include <hip/hip_runtime.h> #include "kmeans.h" // FIXME: Make this a runtime selectable variable! #define ASSUMED_NR_CLUSTERS 32 #define SDATA( index) CUT_BANK_CHECKER(sdata, index) #ifdef USE_TEXTURES // t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]... texture<float, 1, hipReadModeElementType> t_features; // t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1] texture<float, 1, hipReadModeElementType> t_features_flipped; texture<float, 1, hipReadModeElementType> t_clusters; #endif #ifdef USE_CONSTANT_BUFFER #ifdef __KALMAR_CC__ // the initialization value 1e-8 is a workaround for a bug in HLC // strangely speaking, the issue would only happen on Fiji, NOT on Kaveri __attribute__((address_space(2))) float c_clusters[ASSUMED_NR_CLUSTERS*34] = { 1e-8 }; /* constant memory for cluster centers */ #else __constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */ #endif #endif /* ----------------- invert_mapping() --------------------- */ /* inverts data array from row-major to column-major. [p0,dim0][p0,dim1][p0,dim2] ... [p1,dim0][p1,dim1][p1,dim2] ... [p2,dim0][p2,dim1][p2,dim2] ... to [dim0,p0][dim0,p1][dim0,p2] ... [dim1,p0][dim1,p1][dim1,p2] ... [dim2,p0][dim2,p1][dim2,p2] ... */ __global__ void invert_mapping(hipLaunchParm lp, float *input, /* original */ float *output, /* inverted */ int npoints, /* npoints */ int nfeatures) /* nfeatures */ { int point_id = hipThreadIdx_x + hipBlockDim_x*hipBlockIdx_x; /* id of thread */ int i; if(point_id < npoints){ for(i=0;i<nfeatures;i++) output[point_id + npoints*i] = input[point_id*nfeatures + i]; } return; } /* ----------------- invert_mapping() end --------------------- */ /* to turn on the GPU delta and center reduction */ //#define GPU_DELTA_REDUCTION //#define GPU_NEW_CENTER_REDUCTION /* ----------------- kmeansPoint() --------------------- */ /* find the index of nearest cluster centers and change membership*/ __global__ void kmeansPoint(hipLaunchParm lp, float *features, /* in: [npoints*nfeatures] */ float *features_flipped, int nfeatures, int npoints, int nclusters, int *membership, float *clusters, float *block_clusters, int *block_deltas) { // block ID const unsigned int block_id = hipGridDim_x*hipBlockIdx_y+hipBlockIdx_x; // point/thread ID const unsigned int point_id = block_id*hipBlockDim_x*hipBlockDim_y + hipThreadIdx_x; int index = -1; if (point_id < npoints) { int i, j; float min_dist = FLT_MAX; float dist; /* distance square between a point to cluster center */ /* find the cluster center id with min distance to pt */ for (i=0; i<nclusters; i++) { int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */ float ans=0.0; /* Euclidean distance sqaure */ for (j=0; j < nfeatures; j++) { int addr = point_id + j*npoints; /* appropriate index of data point */ #ifdef USE_TEXTURES float diff = (tex1Dfetch(t_features,addr) - #else float diff = (features[addr] - #endif #ifdef USE_CONSTANT_BUFFER c_clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */ #else clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */ #endif ans += diff*diff; /* sum of squares */ } dist = ans; /* see if distance is smaller than previous ones: if so, change minimum distance and save index of cluster center */ if (dist < min_dist) { min_dist = dist; index = i; } } } #ifdef GPU_DELTA_REDUCTION // count how many points are now closer to a different cluster center __shared__ int deltas[THREADS_PER_BLOCK]; if(hipThreadIdx_x < THREADS_PER_BLOCK) { deltas[hipThreadIdx_x] = 0; } #endif if (point_id < npoints) { #ifdef GPU_DELTA_REDUCTION /* if membership changes, increase delta by 1 */ if (membership[point_id] != index) { deltas[hipThreadIdx_x] = 1; } #endif /* assign the membership to object point_id */ membership[point_id] = index; } #ifdef GPU_DELTA_REDUCTION // make sure all the deltas have finished writing to shared memory idx.barrier.wait(); // now let's count them // primitve reduction follows unsigned int threadids_participating = THREADS_PER_BLOCK / 2; for(;threadids_participating > 1; threadids_participating /= 2) { if(hipThreadIdx_x < threadids_participating) { deltas[hipThreadIdx_x] += deltas[hipThreadIdx_x + threadids_participating]; } idx.barrier.wait(); } if(hipThreadIdx_x < 1) {deltas[hipThreadIdx_x] += deltas[hipThreadIdx_x + 1];} idx.barrier.wait(); // propagate number of changes to global counter if(hipThreadIdx_x == 0) { block_deltas[hipBlockIdx_y * hipGridDim_x + hipBlockIdx_x] = deltas[0]; //printf("original id: %d, modified: %d\n", hipBlockIdx_y*hipGridDim_x+hipBlockIdx_x, hipBlockIdx_x); } #endif #ifdef GPU_NEW_CENTER_REDUCTION int center_id = hipThreadIdx_x / nfeatures; int dim_id = hipThreadIdx_x - nfeatures*center_id; __shared__ int new_center_ids[THREADS_PER_BLOCK]; new_center_ids[hipThreadIdx_x] = index; idx.barrier.wait(); /*** determine which dimension calculte the sum for mapping of threads is center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]... ***/ int new_base_index = (point_id - hipThreadIdx_x)*nfeatures + dim_id; float accumulator = 0.f; if(hipThreadIdx_x < nfeatures * nclusters) { // accumulate over all the elements of this threadblock for(int i = 0; i< (THREADS_PER_BLOCK); i++) { #ifdef USE_TEXTURES float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures); #else float val = features_flipped[new_base_index+i*nfeatures]; #endif if(new_center_ids[i] == center_id) accumulator += val; } // now store the sum for this threadblock /*** mapping to global array is block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]... ***/ block_clusters[(hipBlockIdx_y*hipGridDim_x + hipBlockIdx_x) * nclusters * nfeatures + hipThreadIdx_x] = accumulator; } #endif } #endif // #ifndef _KMEANS_CUDA_KERNEL_H_
013ac530a7930e32f911b0980bda12a8511294cd.cu
#include "hip_runtime.h" #ifndef _KMEANS_CUDA_KERNEL_H_ #define _KMEANS_CUDA_KERNEL_H_ #include <stdio.h> #include <cuda.h> #include "kmeans.h" // FIXME: Make this a runtime selectable variable! #define ASSUMED_NR_CLUSTERS 32 #define SDATA( index) CUT_BANK_CHECKER(sdata, index) #ifdef USE_TEXTURES // t_features has the layout dim0[points 0-m-1]dim1[ points 0-m-1]... texture<float, 1, hipReadModeElementType> t_features; // t_features_flipped has the layout point0[dim 0-n-1]point1[dim 0-n-1] texture<float, 1, hipReadModeElementType> t_features_flipped; texture<float, 1, hipReadModeElementType> t_clusters; #endif #ifdef USE_CONSTANT_BUFFER #ifdef __KALMAR_CC__ // the initialization value 1e-8 is a workaround for a bug in HLC // strangely speaking, the issue would only happen on Fiji, NOT on Kaveri __attribute__((address_space(2))) float c_clusters[ASSUMED_NR_CLUSTERS*34] = { 1e-8 }; /* constant memory for cluster centers */ #else __constant__ float c_clusters[ASSUMED_NR_CLUSTERS*34]; /* constant memory for cluster centers */ #endif #endif /* ----------------- invert_mapping() --------------------- */ /* inverts data array from row-major to column-major. [p0,dim0][p0,dim1][p0,dim2] ... [p1,dim0][p1,dim1][p1,dim2] ... [p2,dim0][p2,dim1][p2,dim2] ... to [dim0,p0][dim0,p1][dim0,p2] ... [dim1,p0][dim1,p1][dim1,p2] ... [dim2,p0][dim2,p1][dim2,p2] ... */ __global__ void invert_mapping(hipLaunchParm lp, float *input, /* original */ float *output, /* inverted */ int npoints, /* npoints */ int nfeatures) /* nfeatures */ { int point_id = hipThreadIdx_x + hipBlockDim_x*hipBlockIdx_x; /* id of thread */ int i; if(point_id < npoints){ for(i=0;i<nfeatures;i++) output[point_id + npoints*i] = input[point_id*nfeatures + i]; } return; } /* ----------------- invert_mapping() end --------------------- */ /* to turn on the GPU delta and center reduction */ //#define GPU_DELTA_REDUCTION //#define GPU_NEW_CENTER_REDUCTION /* ----------------- kmeansPoint() --------------------- */ /* find the index of nearest cluster centers and change membership*/ __global__ void kmeansPoint(hipLaunchParm lp, float *features, /* in: [npoints*nfeatures] */ float *features_flipped, int nfeatures, int npoints, int nclusters, int *membership, float *clusters, float *block_clusters, int *block_deltas) { // block ID const unsigned int block_id = hipGridDim_x*hipBlockIdx_y+hipBlockIdx_x; // point/thread ID const unsigned int point_id = block_id*hipBlockDim_x*hipBlockDim_y + hipThreadIdx_x; int index = -1; if (point_id < npoints) { int i, j; float min_dist = FLT_MAX; float dist; /* distance square between a point to cluster center */ /* find the cluster center id with min distance to pt */ for (i=0; i<nclusters; i++) { int cluster_base_index = i*nfeatures; /* base index of cluster centers for inverted array */ float ans=0.0; /* Euclidean distance sqaure */ for (j=0; j < nfeatures; j++) { int addr = point_id + j*npoints; /* appropriate index of data point */ #ifdef USE_TEXTURES float diff = (tex1Dfetch(t_features,addr) - #else float diff = (features[addr] - #endif #ifdef USE_CONSTANT_BUFFER c_clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */ #else clusters[cluster_base_index + j]); /* distance between a data point to cluster centers */ #endif ans += diff*diff; /* sum of squares */ } dist = ans; /* see if distance is smaller than previous ones: if so, change minimum distance and save index of cluster center */ if (dist < min_dist) { min_dist = dist; index = i; } } } #ifdef GPU_DELTA_REDUCTION // count how many points are now closer to a different cluster center __shared__ int deltas[THREADS_PER_BLOCK]; if(hipThreadIdx_x < THREADS_PER_BLOCK) { deltas[hipThreadIdx_x] = 0; } #endif if (point_id < npoints) { #ifdef GPU_DELTA_REDUCTION /* if membership changes, increase delta by 1 */ if (membership[point_id] != index) { deltas[hipThreadIdx_x] = 1; } #endif /* assign the membership to object point_id */ membership[point_id] = index; } #ifdef GPU_DELTA_REDUCTION // make sure all the deltas have finished writing to shared memory idx.barrier.wait(); // now let's count them // primitve reduction follows unsigned int threadids_participating = THREADS_PER_BLOCK / 2; for(;threadids_participating > 1; threadids_participating /= 2) { if(hipThreadIdx_x < threadids_participating) { deltas[hipThreadIdx_x] += deltas[hipThreadIdx_x + threadids_participating]; } idx.barrier.wait(); } if(hipThreadIdx_x < 1) {deltas[hipThreadIdx_x] += deltas[hipThreadIdx_x + 1];} idx.barrier.wait(); // propagate number of changes to global counter if(hipThreadIdx_x == 0) { block_deltas[hipBlockIdx_y * hipGridDim_x + hipBlockIdx_x] = deltas[0]; //printf("original id: %d, modified: %d\n", hipBlockIdx_y*hipGridDim_x+hipBlockIdx_x, hipBlockIdx_x); } #endif #ifdef GPU_NEW_CENTER_REDUCTION int center_id = hipThreadIdx_x / nfeatures; int dim_id = hipThreadIdx_x - nfeatures*center_id; __shared__ int new_center_ids[THREADS_PER_BLOCK]; new_center_ids[hipThreadIdx_x] = index; idx.barrier.wait(); /*** determine which dimension calculte the sum for mapping of threads is center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]... ***/ int new_base_index = (point_id - hipThreadIdx_x)*nfeatures + dim_id; float accumulator = 0.f; if(hipThreadIdx_x < nfeatures * nclusters) { // accumulate over all the elements of this threadblock for(int i = 0; i< (THREADS_PER_BLOCK); i++) { #ifdef USE_TEXTURES float val = tex1Dfetch(t_features_flipped,new_base_index+i*nfeatures); #else float val = features_flipped[new_base_index+i*nfeatures]; #endif if(new_center_ids[i] == center_id) accumulator += val; } // now store the sum for this threadblock /*** mapping to global array is block0[center0[dim0,dim1,dim2,...]center1[dim0,dim1,dim2,...]...]block1[...]... ***/ block_clusters[(hipBlockIdx_y*hipGridDim_x + hipBlockIdx_x) * nclusters * nfeatures + hipThreadIdx_x] = accumulator; } #endif } #endif // #ifndef _KMEANS_CUDA_KERNEL_H_
f96929d4e25bc5928dfdb91f64e825e6c67b3d33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/sparse/SparseTensorMath.h> #include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh> #include <ATen/native/sparse/hip/SparseHIPBlas.cuh> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/ExpandUtils.h> #include <THH/THHTensorMathPointwise.cuh> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/binary_search.h> #include <thrust/sort.h> #include <thrust/system/hip/execution_policy.h> #include <bitset> #define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor) #define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor) namespace at { namespace native { using namespace at::sparse; using at::cuda::detail::TensorInfo; using at::cuda::detail::getTensorInfo; // -------------------------------------------------------------------- // Utility functions // -------------------------------------------------------------------- namespace { IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) { IntTensor csr = at::empty({dim+1}, CUDA(kInt)); IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt)); rowIndicesInt.copy_(rowIndices); sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>()); return csr; } } // NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not // wired at all) // -------------------------------------------------------------------- // addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts] // -------------------------------------------------------------------- Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) { TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU"); TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense})); TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor"); TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims"); // no need to check dense_dim because dense_dim + sparse_dim = dim // mxk * kxn = mxn int64_t m = sparse_.size(0); int64_t k = sparse_.size(1); int64_t n = dense.size(1); TORCH_CHECK(t.size(0) == m, "addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0)); TORCH_CHECK(t.size(1) == n, "addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1)); TORCH_CHECK(dense.size(0) == k, "addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0)); r_.resize_({m, n}); SparseTensor sparse = sparse_.coalesce(); int64_t nnz = sparse._nnz(); LongTensor indices = sparse._indices(); Tensor values = sparse._values(); LongTensor rowIndices = indices.select(0, 0); LongTensor colIndices = indices.select(0, 1); IntTensor csr = _to_csr_int(rowIndices, m, nnz); IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt)); colIndicesInt.copy_(colIndices); // No half support, so we don't have to use CUDATypeConversion Tensor r__; AT_DISPATCH_FLOATING_TYPES( values.scalar_type(), "addmm_sparse_cuda", [&] { scalar_t cast_beta = beta.to<scalar_t>(); scalar_t cast_alpha = alpha.to<scalar_t>(); if (cast_beta == 0) { r_.zero_(); } else if (cast_beta == 1) { if (!is_same_tensor(t, r_)) { r_.copy_(t); } } else { at::mul_out(r_, t, scalar_to_tensor(beta)); } /* r_ */ if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) { r__ = r_; } else { // TODO: how... strange r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous); r__.transpose_(0, 1); } if (nnz > 0) { /* dense */ Tensor dense_; char transpose_dense; if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) { transpose_dense = 'n'; dense_ = dense; } else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) { transpose_dense = 't'; dense_ = dense; } else { transpose_dense = 't'; dense_ = dense.contiguous(); } sparse::cuda::csrmm2( 'n', transpose_dense, m, n, k, nnz, cast_alpha, values.data_ptr<scalar_t>(), csr.data_ptr<int32_t>(), colIndicesInt.data_ptr<int32_t>(), dense_.data_ptr<scalar_t>(), (transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)), cast_beta, r__.data_ptr<scalar_t>(), r__.stride(1)); } }); r_.copy_(r__); return r_; } Tensor& addmm_out_sparse_dense_cuda( Tensor& result, const Tensor& self, const SparseTensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha ) { Tensor b_self; std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out"); return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha); } Tensor s_addmm_sparse_dense_cuda( const Tensor& t, const SparseTensor& sparse, const Tensor& dense, Scalar beta, Scalar alpha ) { Tensor r = at::empty({0}, t.options()); s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha); return r; } Tensor addmm_sparse_dense_cuda( const Tensor& self, const SparseTensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha ) { Tensor b_self; std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out"); return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha); } Tensor& s_addmm_sparse_dense_cuda_( Tensor& t, const SparseTensor& sparse, const Tensor& dense, Scalar beta, Scalar alpha ) { return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha); } // NB: Purposely no broadcasting version of addmm inplace // Deleted sspaddmm (sparse, dense) -> sparse // -------------------------------------------------------------------- // hspmm(SparseTensor mat1, Tensor mat2) // -------------------------------------------------------------------- SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) { TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, sparse_, dense})); TORCH_CHECK(sparse_.sparse_dim() == 2, "hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor"); TORCH_CHECK(sparse_.dense_dim() == 0, "hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values"); TORCH_CHECK(dense.dim() == 2, "hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor"); int64_t m = sparse_.size(0); int64_t k = sparse_.size(1); int64_t n = dense.size(1); TORCH_CHECK(dense.size(0) == k, "hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0)); get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n}); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); SparseTensor sparse = sparse_.coalesce(); int64_t nnz = sparse._nnz(); LongTensor indices = at::empty({1, nnz}, CUDA(kLong)); // create values in column-major format to avoid copying in spaddmm Tensor values = at::empty({n, nnz}, dense.options()); values.transpose_(0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce SparseTensor newSparse = sparse.clone(); LongTensor spIndices = newSparse._indices(); LongTensor dstIndices = spIndices.select(0, 0); // Save destination indices to output hybrid tensor indices.copy_(dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>()); thrust::sequence(policy, indicesIter, indicesIter + nnz); std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec(); new_size[0] = nnz; get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size); s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1); get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values); return r_; } SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) { SparseTensor r = at::empty({0}, sparse.options()); hspmm_out_sparse_cuda(r, sparse, dense); return r; } // -------------------------------------------------------------------- // add(Tensor, SparseTensor, Scalar) // formerly known as spcadd // -------------------------------------------------------------------- Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) { TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(cuda::check_device({sparse, r_, dense})); TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); const int64_t nnz = sparse._nnz(); if (nnz == 0) { r_.resize_as_(dense); r_.copy_(dense); return r_; } auto commonDtype = at::result_type(dense, sparse); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); Tensor r = r_; if (r_.scalar_type() != commonDtype) { r = at::empty_like(dense, r_.options().dtype(commonDtype)); } Tensor dense_buffer = dense.to(commonDtype); Tensor values = sparse._values().to(commonDtype); if (is_same_tensor(r, dense_buffer)) { TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )"); } else { r.resize_as_(dense); r.copy_(dense_buffer); } LongTensor indices = sparse._indices(); int64_t nDim = dense.dim(); int64_t nDimI = sparse.sparse_dim(); if (values.numel() == 0) { return r_; } if (sparse.is_coalesced()) { // TODO benchmark to decide whether to remove this special case const dim3 block = cuda::getApplyBlock(); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); if (sparse.dense_dim() == 0) { TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions"); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { hipLaunchKernelGGL(( apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t>) , dim3(grid), dim3(block), 0, stream, TensorCAddOp<scalar_t>(value.to<scalar_t>()), V_INFO(r), I_INFO(indices), V_INFO(values), static_cast<uint64_t>(nnz)); }); }); } else { TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions"); // sparseElementwiseKernel needs values to be contiguous too values = values.contiguous(); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { hipLaunchKernelGGL(( apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t>) , dim3(grid), dim3(block), 0, stream, TensorCAddOp<scalar_t>(value.to<scalar_t>()), V_INFO(r), I_INFO(indices), V_INFO(values), static_cast<uint64_t>(nnz)); }); }); } } else { LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0); // FIXME: at some point we can wrap the scale into indexAdd // NB: Purposely not inplace! AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { if (value.to<scalar_t>() != static_cast<scalar_t>(1)) { values = values.mul(value); } }); }); int64_t view_rows = 1; int64_t view_columns = 1; for (int i = 0; i < nDimI; i++) { view_rows *= r.size(i); } for (int i = nDimI; i < nDim; i++) { view_columns *= r.size(i); } Tensor r_view = r.view({view_rows, view_columns}); values = values.reshape({nnz, view_columns}); r_view.index_add_(0, indices1D, values); } THCudaCheck(hipGetLastError()); r_.copy_(r); return r_; } // -------------------------------------------------------------------- // add(SparseTensor, SparseTensor, Scalar) [broadcasts] // -------------------------------------------------------------------- Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value); SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) { if (!t.is_sparse()) { return add_out_dense_sparse_cuda(r_, t, src, value); } // TODO: This test seems a bit goofy TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead."); TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, t, src})); auto commonDtype = at::result_type(t, src); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes()); if (src._nnz() == 0) { return copy_sparse_to_sparse_(r_, t); } if (t._nnz() == 0) { return mul_out_sparse_scalar(r_, src, value); } TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions"); // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. LongTensor t_indices_ = t._indices(); LongTensor s_indices_ = src._indices(); Tensor t_values_ = t._values().to(commonDtype); Tensor s_values_ = src._values().to(commonDtype); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] { if (value.to<scalar_t>() != static_cast<scalar_t>(1)) { s_values_ = s_values_.mul(value); } }); }); LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1); Tensor r_values_ = at::cat({t_values_, s_values_}, 0); if (r_.scalar_type() != commonDtype) { SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype)); promoted.resize_as_(src); alias_into_sparse(promoted, r_indices_, r_values_); // performs the addition under the common dtype. promoted = promoted.coalesce(); r_values_ = promoted._values().to(r_.scalar_type()); r_indices_ = promoted._indices(); } else { r_.resize_as_(src); } alias_into_sparse(r_, r_indices_, r_values_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } return r_; } // -------------------------------------------------------------------- // mul(SparseTensor, SparseTensor) [broadcasts] // -------------------------------------------------------------------- SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) { if (src_.dim() == 0) { return mul_out_sparse_zerodim(r_, t_, src_); } else if (t_.dim() == 0) { return mul_out_sparse_zerodim(r_, src_, t_); } TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, t_, src_})); TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes()); SparseTensor t = t_.coalesce(); SparseTensor src = src_.coalesce(); if (src_._nnz() == 0 || t_._nnz() == 0) { r_.resize_as_(src_); return r_.zero_(); } // saving those because they can be overwritten when doing in-place operations int64_t t_nnz = t._nnz(), s_nnz = src._nnz(); int64_t max_nnz = ::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped int64_t sparse_dim = src.sparse_dim(); auto commonDtype = at::result_type(t, src); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); LongTensor t_indices_ = t._indices().contiguous(); Tensor t_values_ = t._values().to(commonDtype); LongTensor s_indices_ = src._indices().contiguous(); Tensor s_values_ = src._values().to(commonDtype); LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options()); r_.resize_as_(src); Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_(); int64_t valueSize = t_values_.stride(0); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions"); LongTensor resultNnz = at::empty({1}, CUDA(kLong)); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] { hipLaunchKernelGGL(( apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t>) , dim3(grid), dim3(block), 0, stream, TensorMulOp<scalar_t>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz)); THCudaCheck(hipGetLastError()); hipLaunchKernelGGL(( apply::indexSparseIntersectionKernel<uint64_t, scalar_t>) , dim3(1), dim3(1), 0, stream, I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), // reinterpret_cast shenanigans, because we don't actually have // unsigned tensors... static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr())); THCudaCheck(hipGetLastError()); }); r_values_ = r_values_.to(r_.scalar_type()); get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_); // sync! (surely there is a more idiomatic way to do this...) LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong)); cpu_resultNnz.copy_(resultNnz); get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]); return r_._coalesced_(true); } // -------------------------------------------------------------------- // sparse.sum() backward // // see NOTE [ sparse.sum() backward ] // -------------------------------------------------------------------- template <typename scalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void _sparse_sum_backward_cuda_kernel( int64_t total_threads, const TensorInfo<int64_t, int64_t> grad_indices_ti, const TensorInfo<int64_t, int64_t> input_indices_ti, const TensorInfo<int64_t, int64_t> input_indices_pos_ti, const TensorInfo<scalar_t, int64_t> grad_values_expand_ti, TensorInfo<scalar_t, int64_t> grad_input_values_ti ) { const int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= total_threads) return; const int64_t j = input_indices_pos_ti.data[i]; bool has_match = false; if (grad_indices_ti.data[j] == input_indices_ti.data[i]) { has_match = true; } int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0]; int64_t out_start = i * grad_input_values_stride0; int64_t out_end = (i + 1) * grad_input_values_stride0; int64_t in_start = j * grad_values_expand_ti.strides[0]; if (has_match) { for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) { grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i]; } } else { for (int64_t out_i = out_start; out_i < out_end; out_i++) { grad_input_values_ti.data[out_i] = scalar_t(0); } } } Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) { TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor"); TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor"); auto input = input_.coalesce(); const int64_t input_dim = input.dim(); auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim); auto dims_to_sum_v = dims_to_sum.vec(); maybe_wrap_dims(dims_to_sum_v, input_dim); LongTensor input_indices = input._indices(); Tensor input_values = input._values(); IntArrayRef input_sizes = input.sizes(); const int64_t input_sparse_dim = input.sparse_dim(); const int64_t input_dense_dim = input.dense_dim(); const int64_t input_nnz = input._nnz(); int64_t sparse_dims_to_sum_size = 0; auto sparse_dims_to_keep_v = std::vector<int64_t>(); auto dense_dims_to_sum_v = std::vector<int64_t>(); for (int64_t d = 0; d < input_dim; d++) { if (dims_to_sum_b[d]) { if (d < input_sparse_dim) sparse_dims_to_sum_size ++; else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim); } else { if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d); } } const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size); const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0); const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0); if (sum_all_sparse_dim) { TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed"); auto grad_input_values = grad_; auto expand_size = input_values.sizes().vec(); if (sum_dense_dim) { auto dense_expand_size = std::vector<int64_t>(expand_size); dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim grad_input_values = grad_input_values.expand(dense_expand_size); } grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous); return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype } else { TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense"); auto grad = grad_.coalesce(); LongTensor grad_indices = grad._indices(); Tensor grad_values = grad._values(); const int64_t grad_sparse_dim = grad.sparse_dim(); const int64_t grad_nnz = grad._nnz(); Tensor grad_values_expand = grad_values; if (sum_dense_dim) { auto expand_size = input_values.sizes().vec(); if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d); grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous); } Tensor grad_input_values; if (!sum_sparse_dim) { grad_input_values = grad_values_expand; } else { int curDevice = -1; hipGetDevice(&curDevice); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(curDevice); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); typedef thrust::device_ptr<int64_t> thrust_ptr; grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_ASSERT(grad_input_values.is_cuda()); // get 1D indices auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim); std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0); auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v); thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>()); thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>()); // store lower_bound of input indices at grad indices LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT); thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>()); thrust::lower_bound(policy, grad_indices_iter, grad_indices_iter + grad_nnz, input_indices_iter, input_indices_iter + input_nnz, input_indices_pos_iter); // config to run cuda kernel int64_t total_threads = input_nnz; const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads)); dim3 grid; TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions"); auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D); auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D); auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] { auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand); auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values); hipLaunchKernelGGL(( _sparse_sum_backward_cuda_kernel<scalar_t>), dim3(grid), dim3(block), 0, stream, total_threads, grad_indices_ti, input_indices_ti, input_indices_pos_ti, grad_values_expand_ti, grad_input_values_ti ); }); } return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options()); } } }} // namespace at::native
f96929d4e25bc5928dfdb91f64e825e6c67b3d33.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/SparseTensorUtils.h> #include <ATen/native/sparse/SparseTensorMath.h> #include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh> #include <ATen/native/sparse/cuda/SparseCUDABlas.cuh> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/ExpandUtils.h> #include <THC/THCTensorMathPointwise.cuh> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/binary_search.h> #include <thrust/sort.h> #include <thrust/system/cuda/execution_policy.h> #include <bitset> #define I_INFO(tensor) cuda::detail::getTensorInfo<int64_t, uint64_t>(tensor) #define V_INFO(tensor) cuda::detail::getTensorInfo<scalar_t, uint64_t>(tensor) namespace at { namespace native { using namespace at::sparse; using at::cuda::detail::TensorInfo; using at::cuda::detail::getTensorInfo; // -------------------------------------------------------------------- // Utility functions // -------------------------------------------------------------------- namespace { IntTensor _to_csr_int(const LongTensor& rowIndices, int64_t dim, int64_t nnz) { IntTensor csr = at::empty({dim+1}, CUDA(kInt)); IntTensor rowIndicesInt = at::empty({rowIndices.size(0)}, CUDA(kInt)); rowIndicesInt.copy_(rowIndices); sparse::cuda::Xcoo2csr(rowIndicesInt.data_ptr<int32_t>(), nnz, dim, csr.data_ptr<int32_t>()); return csr; } } // NB: Deleted spaddcmul (aka addcmul_, but not actually wired up), spaddcdiv (not // wired at all) // -------------------------------------------------------------------- // addmm(Tensor, SparseTensor, Tensor, Scalar, Scalar) [broadcasts] // -------------------------------------------------------------------- Tensor& s_addmm_out_sparse_dense_cuda(Tensor& r_, const Tensor& t, const SparseTensor& sparse_, const Tensor& dense, Scalar beta, Scalar alpha) { TORCH_CHECK(t.is_cuda(), "addmm: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "addmm: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(sparse_.is_cuda(), "addmm: expected 'mat1' to be CUDA, but got CPU"); TORCH_CHECK(dense.is_cuda(), "addmm: expected 'mat2' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({sparse_, r_, t, dense})); TORCH_CHECK(dense.dim() == 2, "addmm: 2D tensor expected, got ", dense.dim(), "D tensor"); TORCH_CHECK(sparse_.sparse_dim() == 2, "addmm: expected first two dims to be sparse (indices has size 2 at first dim), but got ", sparse_.sparse_dim(), " sparse dims"); // no need to check dense_dim because dense_dim + sparse_dim = dim // mxk * kxn = mxn int64_t m = sparse_.size(0); int64_t k = sparse_.size(1); int64_t n = dense.size(1); TORCH_CHECK(t.size(0) == m, "addmm: Argument #1 (t): Expected dim 0 size ", m, ", got ", t.size(0)); TORCH_CHECK(t.size(1) == n, "addmm: Argument #1 (t): Expected dim 1 size ", n, ", got ", t.size(1)); TORCH_CHECK(dense.size(0) == k, "addmm: Argument #3 (dense): Expected dim 0 size ", k, ", got ", dense.size(0)); r_.resize_({m, n}); SparseTensor sparse = sparse_.coalesce(); int64_t nnz = sparse._nnz(); LongTensor indices = sparse._indices(); Tensor values = sparse._values(); LongTensor rowIndices = indices.select(0, 0); LongTensor colIndices = indices.select(0, 1); IntTensor csr = _to_csr_int(rowIndices, m, nnz); IntTensor colIndicesInt = at::empty({colIndices.size(0)}, indices.options().dtype(kInt)); colIndicesInt.copy_(colIndices); // No half support, so we don't have to use CUDATypeConversion Tensor r__; AT_DISPATCH_FLOATING_TYPES( values.scalar_type(), "addmm_sparse_cuda", [&] { scalar_t cast_beta = beta.to<scalar_t>(); scalar_t cast_alpha = alpha.to<scalar_t>(); if (cast_beta == 0) { r_.zero_(); } else if (cast_beta == 1) { if (!is_same_tensor(t, r_)) { r_.copy_(t); } } else { at::mul_out(r_, t, scalar_to_tensor(beta)); } /* r_ */ if(r_.stride(0) == 1 && r_.stride(1) == r_.size(0)) { r__ = r_; } else { // TODO: how... strange r__ = r_.transpose(0, 1).clone(at::MemoryFormat::Contiguous); r__.transpose_(0, 1); } if (nnz > 0) { /* dense */ Tensor dense_; char transpose_dense; if(dense.stride(0) == 1 && dense.stride(1) == dense.size(0)) { transpose_dense = 'n'; dense_ = dense; } else if(dense.stride(1) == 1 && dense.stride(0) != dense.size(1)) { transpose_dense = 't'; dense_ = dense; } else { transpose_dense = 't'; dense_ = dense.contiguous(); } sparse::cuda::csrmm2( 'n', transpose_dense, m, n, k, nnz, cast_alpha, values.data_ptr<scalar_t>(), csr.data_ptr<int32_t>(), colIndicesInt.data_ptr<int32_t>(), dense_.data_ptr<scalar_t>(), (transpose_dense == 'n' ? dense_.stride(1) : dense_.stride(0)), cast_beta, r__.data_ptr<scalar_t>(), r__.stride(1)); } }); r_.copy_(r__); return r_; } Tensor& addmm_out_sparse_dense_cuda( Tensor& result, const Tensor& self, const SparseTensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha ) { Tensor b_self; std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out"); return s_addmm_out_sparse_dense_cuda(result, b_self, mat1, mat2, beta, alpha); } Tensor s_addmm_sparse_dense_cuda( const Tensor& t, const SparseTensor& sparse, const Tensor& dense, Scalar beta, Scalar alpha ) { Tensor r = at::empty({0}, t.options()); s_addmm_out_sparse_dense_cuda(r, t, sparse, dense, beta, alpha); return r; } Tensor addmm_sparse_dense_cuda( const Tensor& self, const SparseTensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha ) { Tensor b_self; std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out"); return s_addmm_sparse_dense_cuda(b_self, mat1, mat2, beta, alpha); } Tensor& s_addmm_sparse_dense_cuda_( Tensor& t, const SparseTensor& sparse, const Tensor& dense, Scalar beta, Scalar alpha ) { return s_addmm_out_sparse_dense_cuda(t, t, sparse, dense, beta, alpha); } // NB: Purposely no broadcasting version of addmm inplace // Deleted sspaddmm (sparse, dense) -> sparse // -------------------------------------------------------------------- // hspmm(SparseTensor mat1, Tensor mat2) // -------------------------------------------------------------------- SparseTensor& hspmm_out_sparse_cuda(SparseTensor& r_, const SparseTensor& sparse_, const Tensor& dense/* , Scalar alpha */) { TORCH_CHECK(sparse_.is_cuda(), "hspmm: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "hspmm: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(dense.is_cuda(), "hspmm: expected 'mat2' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, sparse_, dense})); TORCH_CHECK(sparse_.sparse_dim() == 2, "hspmm: Argument #2: 2D tensor expected, got ", sparse_.sparse_dim(), "D tensor"); TORCH_CHECK(sparse_.dense_dim() == 0, "hspmm: Argument #2: scalar values expected, got ", sparse_.dense_dim(), "D values"); TORCH_CHECK(dense.dim() == 2, "hspmm: Argument #3: 2D tensor expected, got ", dense.dim(), "D tensor"); int64_t m = sparse_.size(0); int64_t k = sparse_.size(1); int64_t n = dense.size(1); TORCH_CHECK(dense.size(0) == k, "hspmm: Argument #3: Expected dim 0 size ", k, ", got ", dense.size(0)); get_sparse_impl(r_)->resize_and_clear_(1, 1, {m, n}); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); SparseTensor sparse = sparse_.coalesce(); int64_t nnz = sparse._nnz(); LongTensor indices = at::empty({1, nnz}, CUDA(kLong)); // create values in column-major format to avoid copying in spaddmm Tensor values = at::empty({n, nnz}, dense.options()); values.transpose_(0, 1); // why does sparse need to be cloned? If this is really necessary maybe we // need to fuse this with newCoalesce SparseTensor newSparse = sparse.clone(); LongTensor spIndices = newSparse._indices(); LongTensor dstIndices = spIndices.select(0, 0); // Save destination indices to output hybrid tensor indices.copy_(dstIndices); // Replace destination indices with 0, 1, 2, 3, ... and compute output values // tensor with sparse * dense multiplication thrust::device_ptr<int64_t> indicesIter(dstIndices.data_ptr<int64_t>()); thrust::sequence(policy, indicesIter, indicesIter + nnz); std::vector<int64_t> new_size = get_sparse_impl(newSparse)->sizes().vec(); new_size[0] = nnz; get_sparse_impl(newSparse)->raw_resize_(get_sparse_impl(newSparse)->sparse_dim(), get_sparse_impl(newSparse)->dense_dim(), new_size); s_addmm_out_sparse_dense_cuda(values, values, newSparse, dense, 0, /*alpha*/ 1); get_sparse_impl(r_)->set_indices_and_values_unsafe(indices, values); return r_; } SparseTensor hspmm_sparse_cuda(const SparseTensor& sparse, const Tensor& dense) { SparseTensor r = at::empty({0}, sparse.options()); hspmm_out_sparse_cuda(r, sparse, dense); return r; } // -------------------------------------------------------------------- // add(Tensor, SparseTensor, Scalar) // formerly known as spcadd // -------------------------------------------------------------------- Tensor& add_out_dense_sparse_cuda(Tensor& r_, const Tensor& dense, const SparseTensor& sparse, at::Scalar value) { TORCH_CHECK(dense.is_cuda(), "add: expected 'self' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(sparse.is_cuda(), "add: expected 'other' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be a CUDA tensor, but got a CPU tensor"); TORCH_CHECK(cuda::check_device({sparse, r_, dense})); TORCH_CHECK(dense.sizes().equals(sparse.sizes()), "add: expected 'self' and 'other' to have same size, but self has size ", dense.sizes(), " while other has size ", sparse.sizes(), " (FYI: dense-sparse addition does not currently support broadcasting)"); const int64_t nnz = sparse._nnz(); if (nnz == 0) { r_.resize_as_(dense); r_.copy_(dense); return r_; } auto commonDtype = at::result_type(dense, sparse); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); Tensor r = r_; if (r_.scalar_type() != commonDtype) { r = at::empty_like(dense, r_.options().dtype(commonDtype)); } Tensor dense_buffer = dense.to(commonDtype); Tensor values = sparse._values().to(commonDtype); if (is_same_tensor(r, dense_buffer)) { TORCH_CHECK(r_.is_contiguous(), "add: CUDA dense-sparse addition with a non-contiguous output tensor does not work; shout if you need it (see https://github.com/pytorch/pytorch/issues/1521 )"); } else { r.resize_as_(dense); r.copy_(dense_buffer); } LongTensor indices = sparse._indices(); int64_t nDim = dense.dim(); int64_t nDimI = sparse.sparse_dim(); if (values.numel() == 0) { return r_; } if (sparse.is_coalesced()) { // TODO benchmark to decide whether to remove this special case const dim3 block = cuda::getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); if (sparse.dense_dim() == 0) { TORCH_CHECK(cuda::getApplyGrid(nnz, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions"); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { apply::sparseElementwiseKernelScalar<TensorCAddOp<scalar_t>, uint64_t, scalar_t> <<<grid, block, 0, stream>>>( TensorCAddOp<scalar_t>(value.to<scalar_t>()), V_INFO(r), I_INFO(indices), V_INFO(values), static_cast<uint64_t>(nnz)); }); }); } else { TORCH_CHECK(cuda::getApplyGrid(nnz * block.x, grid, curDevice), "add: Argument #0: tensor too large or too many dimensions"); // sparseElementwiseKernel needs values to be contiguous too values = values.contiguous(); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { apply::sparseElementwiseKernel<TensorCAddOp<scalar_t>, uint64_t, scalar_t> <<<grid, block, 0, stream>>>( TensorCAddOp<scalar_t>(value.to<scalar_t>()), V_INFO(r), I_INFO(indices), V_INFO(values), static_cast<uint64_t>(nnz)); }); }); } } else { LongTensor indices1D = flatten_indices(indices, sparse.sizes(), 0); // FIXME: at some point we can wrap the scale into indexAdd // NB: Purposely not inplace! AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_dense_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_dense_sparse_cuda", [&] { if (value.to<scalar_t>() != static_cast<scalar_t>(1)) { values = values.mul(value); } }); }); int64_t view_rows = 1; int64_t view_columns = 1; for (int i = 0; i < nDimI; i++) { view_rows *= r.size(i); } for (int i = nDimI; i < nDim; i++) { view_columns *= r.size(i); } Tensor r_view = r.view({view_rows, view_columns}); values = values.reshape({nnz, view_columns}); r_view.index_add_(0, indices1D, values); } THCudaCheck(cudaGetLastError()); r_.copy_(r); return r_; } // -------------------------------------------------------------------- // add(SparseTensor, SparseTensor, Scalar) [broadcasts] // -------------------------------------------------------------------- Tensor& add_out_dense_sparse_cuda(Tensor& r, const Tensor& dense, const SparseTensor& sparse_, Scalar value); SparseTensor& add_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t, const SparseTensor& src, Scalar value) { if (!t.is_sparse()) { return add_out_dense_sparse_cuda(r_, t, src, value); } // TODO: This test seems a bit goofy TORCH_CHECK(src.is_sparse(), "add(sparse, dense) is not supported. Use add(dense, sparse) instead."); TORCH_CHECK(t.is_cuda(), "add: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(src.is_cuda(), "add: expected 'other' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "add: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, t, src})); auto commonDtype = at::result_type(t, src); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); TORCH_CHECK(t.sizes().equals(src.sizes()), "add: expected 'self' and 'other' to have same size, but ", t.sizes(), " != ", src.sizes()); if (src._nnz() == 0) { return copy_sparse_to_sparse_(r_, t); } if (t._nnz() == 0) { return mul_out_sparse_scalar(r_, src, value); } TORCH_CHECK(is_same_density(t, src), "add: expected 'self' and 'other' to have same density, but 'self' has ", t.sparse_dim(), " sparse dimensions while 'other' has ", src.sparse_dim(), " sparse dimensions"); // We deliberately choose to simply concat the indices and values tensors // rather than merging them. This removes the need to synchronously fetch nnz // at the end of the operation, at the cost of having a non-coalesced result. // This trade-off is preferable for the common use-case of gradient accumulation. LongTensor t_indices_ = t._indices(); LongTensor s_indices_ = src._indices(); Tensor t_values_ = t._values().to(commonDtype); Tensor s_values_ = src._values().to(commonDtype); AT_DISPATCH_ALL_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, commonDtype, "add_out_sparse_cuda", [&] { AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "add_out_sparse_cuda", [&] { if (value.to<scalar_t>() != static_cast<scalar_t>(1)) { s_values_ = s_values_.mul(value); } }); }); LongTensor r_indices_ = at::cat({t_indices_, s_indices_}, 1); Tensor r_values_ = at::cat({t_values_, s_values_}, 0); if (r_.scalar_type() != commonDtype) { SparseTensor promoted = at::empty({0}, r_.options().dtype(commonDtype)); promoted.resize_as_(src); alias_into_sparse(promoted, r_indices_, r_values_); // performs the addition under the common dtype. promoted = promoted.coalesce(); r_values_ = promoted._values().to(r_.scalar_type()); r_indices_ = promoted._indices(); } else { r_.resize_as_(src); } alias_into_sparse(r_, r_indices_, r_values_); // FIXME: add some heuristic about when to call coalesce() here, so that // tensors don't totally blow up in size by concatenation; e.g. // r->minUnique = max(a->minUnique + b->minUnique); // if (r->nnz / r->minUnique > COMPACTION_THRESHOLD) { // THCSTensor_(contiguous)(r); // r->minUnique = r->nnz; // } return r_; } // -------------------------------------------------------------------- // mul(SparseTensor, SparseTensor) [broadcasts] // -------------------------------------------------------------------- SparseTensor& mul_out_sparse_cuda(SparseTensor& r_, const SparseTensor& t_, const SparseTensor& src_) { if (src_.dim() == 0) { return mul_out_sparse_zerodim(r_, t_, src_); } else if (t_.dim() == 0) { return mul_out_sparse_zerodim(r_, src_, t_); } TORCH_CHECK(t_.is_cuda(), "mul: expected 'self' to be CUDA, but got CPU"); TORCH_CHECK(src_.is_cuda(), "mul: expected 'other' to be CUDA, but got CPU"); TORCH_CHECK(r_.is_cuda(), "mul: expected 'out' to be CUDA, but got CPU"); TORCH_CHECK(cuda::check_device({r_, t_, src_})); TORCH_CHECK(t_.sizes().equals(src_.sizes()), "mul: expected 'self' and 'other' to have same size, but ", t_.sizes(), " != ", src_.sizes()); SparseTensor t = t_.coalesce(); SparseTensor src = src_.coalesce(); if (src_._nnz() == 0 || t_._nnz() == 0) { r_.resize_as_(src_); return r_.zero_(); } // saving those because they can be overwritten when doing in-place operations int64_t t_nnz = t._nnz(), s_nnz = src._nnz(); int64_t max_nnz = std::min(t_nnz, s_nnz); // multiply by zero is zero, and can be dropped int64_t sparse_dim = src.sparse_dim(); auto commonDtype = at::result_type(t, src); TORCH_CHECK(canCast(commonDtype, r_.scalar_type()), "Can't convert result type ", commonDtype, " to output ", r_.scalar_type()); LongTensor t_indices_ = t._indices().contiguous(); Tensor t_values_ = t._values().to(commonDtype); LongTensor s_indices_ = src._indices().contiguous(); Tensor s_values_ = src._values().to(commonDtype); LongTensor r_indices_ = at::empty({sparse_dim, max_nnz}, t_indices_.options()); r_.resize_as_(src); Tensor r_values_ = new_values_with_size_of(t_values_, max_nnz).zero_(); int64_t valueSize = t_values_.stride(0); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), valueSize)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); TORCH_CHECK(cuda::getApplyGrid(valueSize, grid, curDevice), "mul: Argument #0: tensor too large or too many dimensions"); LongTensor resultNnz = at::empty({1}, CUDA(kLong)); AT_DISPATCH_ALL_TYPES_AND( at::ScalarType::Half, commonDtype, "mul_out_sparse_cuda", [&] { apply::valueSparseIntersectionKernel<TensorMulOp<scalar_t>, uint64_t, scalar_t> <<<grid, block, 0, stream>>>( TensorMulOp<scalar_t>(), I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), V_INFO(r_values_), V_INFO(t_values_), V_INFO(s_values_), static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz)); THCudaCheck(cudaGetLastError()); apply::indexSparseIntersectionKernel<uint64_t, scalar_t> <<<1, 1, 0, stream>>>( I_INFO(r_indices_), I_INFO(t_indices_), I_INFO(s_indices_), // reinterpret_cast shenanigans, because we don't actually have // unsigned tensors... static_cast<uint64_t>(t_nnz), static_cast<uint64_t>(s_nnz), reinterpret_cast<uint64_t*>(resultNnz.data_ptr())); THCudaCheck(cudaGetLastError()); }); r_values_ = r_values_.to(r_.scalar_type()); get_sparse_impl(r_)->set_indices_and_values_unsafe(r_indices_, r_values_); // sync! (surely there is a more idiomatic way to do this...) LongTensor cpu_resultNnz = at::empty({1}, CPU(kLong)); cpu_resultNnz.copy_(resultNnz); get_sparse_impl(r_)->set_nnz_and_narrow(cpu_resultNnz.accessor<int64_t, 1>()[0]); return r_._coalesced_(true); } // -------------------------------------------------------------------- // sparse.sum() backward // // see NOTE [ sparse.sum() backward ] // -------------------------------------------------------------------- template <typename scalar_t> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_1(512) #endif __global__ void _sparse_sum_backward_cuda_kernel( int64_t total_threads, const TensorInfo<int64_t, int64_t> grad_indices_ti, const TensorInfo<int64_t, int64_t> input_indices_ti, const TensorInfo<int64_t, int64_t> input_indices_pos_ti, const TensorInfo<scalar_t, int64_t> grad_values_expand_ti, TensorInfo<scalar_t, int64_t> grad_input_values_ti ) { const int64_t i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= total_threads) return; const int64_t j = input_indices_pos_ti.data[i]; bool has_match = false; if (grad_indices_ti.data[j] == input_indices_ti.data[i]) { has_match = true; } int64_t grad_input_values_stride0 = grad_input_values_ti.strides[0]; int64_t out_start = i * grad_input_values_stride0; int64_t out_end = (i + 1) * grad_input_values_stride0; int64_t in_start = j * grad_values_expand_ti.strides[0]; if (has_match) { for (int64_t out_i = out_start, in_i = in_start; out_i < out_end; out_i++, in_i++) { grad_input_values_ti.data[out_i] = grad_values_expand_ti.data[in_i]; } } else { for (int64_t out_i = out_start; out_i < out_end; out_i++) { grad_input_values_ti.data[out_i] = scalar_t(0); } } } Tensor _sparse_sum_backward_cuda(const Tensor& grad_, const SparseTensor& input_, IntArrayRef dims_to_sum) { TORCH_CHECK(grad_.is_cuda(), "_sparse_sum_backward_cuda: expected 'grad_' to be CUDA tensor, but got CPU tensor"); TORCH_CHECK(input_.is_cuda(), "_sparse_sum_backward_cuda: expected 'input_' to be CUDA tensor, but got CPU tensor"); auto input = input_.coalesce(); const int64_t input_dim = input.dim(); auto dims_to_sum_b = dim_list_to_bitset(dims_to_sum, input_dim); auto dims_to_sum_v = dims_to_sum.vec(); maybe_wrap_dims(dims_to_sum_v, input_dim); LongTensor input_indices = input._indices(); Tensor input_values = input._values(); IntArrayRef input_sizes = input.sizes(); const int64_t input_sparse_dim = input.sparse_dim(); const int64_t input_dense_dim = input.dense_dim(); const int64_t input_nnz = input._nnz(); int64_t sparse_dims_to_sum_size = 0; auto sparse_dims_to_keep_v = std::vector<int64_t>(); auto dense_dims_to_sum_v = std::vector<int64_t>(); for (int64_t d = 0; d < input_dim; d++) { if (dims_to_sum_b[d]) { if (d < input_sparse_dim) sparse_dims_to_sum_size ++; else dense_dims_to_sum_v.emplace_back(d + 1 - input_sparse_dim); } else { if (d < input_sparse_dim) sparse_dims_to_keep_v.emplace_back(d); } } const bool sum_all_sparse_dim = (input_sparse_dim == sparse_dims_to_sum_size); const bool sum_dense_dim = (dense_dims_to_sum_v.size() > 0); const bool sum_sparse_dim = (sparse_dims_to_sum_size > 0); if (sum_all_sparse_dim) { TORCH_CHECK(!grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad Tensor to be dense since all sparse dims are summed"); auto grad_input_values = grad_; auto expand_size = input_values.sizes().vec(); if (sum_dense_dim) { auto dense_expand_size = std::vector<int64_t>(expand_size); dense_expand_size.erase(dense_expand_size.begin()); // remove nnz dim for (auto d : dense_dims_to_sum_v) grad_input_values = grad_input_values.unsqueeze(d - 1); // -1 since grad has no nnz dim grad_input_values = grad_input_values.expand(dense_expand_size); } grad_input_values = grad_input_values.expand(expand_size).clone(at::MemoryFormat::Contiguous); return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, input.options().dtype(grad_.dtype())); // convert to grad dtype } else { TORCH_CHECK(grad_.is_sparse(), "_sparse_sum_backward_cuda: expected grad_ Tensor to be sparse, but got dense"); auto grad = grad_.coalesce(); LongTensor grad_indices = grad._indices(); Tensor grad_values = grad._values(); const int64_t grad_sparse_dim = grad.sparse_dim(); const int64_t grad_nnz = grad._nnz(); Tensor grad_values_expand = grad_values; if (sum_dense_dim) { auto expand_size = input_values.sizes().vec(); if (sum_sparse_dim) expand_size[0] = grad_values.size(0); // update nnz for (auto d : dense_dims_to_sum_v) grad_values_expand = grad_values_expand.unsqueeze(d); grad_values_expand = grad_values_expand.expand(expand_size).clone(at::MemoryFormat::Contiguous); } Tensor grad_input_values; if (!sum_sparse_dim) { grad_input_values = grad_values_expand; } else { int curDevice = -1; cudaGetDevice(&curDevice); cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); typedef thrust::device_ptr<int64_t> thrust_ptr; grad_input_values = at::empty_like(input_values, grad_values.options(), LEGACY_CONTIGUOUS_MEMORY_FORMAT); AT_ASSERT(grad_input_values.is_cuda()); // get 1D indices auto grad_sparse_dim_to_keep_v = std::vector<int64_t>(grad_sparse_dim); std::iota(grad_sparse_dim_to_keep_v.begin(), grad_sparse_dim_to_keep_v.end(), 0); auto grad_indices_1D = flatten_indices_by_dims(grad_indices, grad.sizes(), grad_sparse_dim_to_keep_v); // flatten indices on all sparse_dim of grad, output indices is coalesced and sorted auto input_indices_1D = flatten_indices_by_dims(input_indices, input_sizes, sparse_dims_to_keep_v); thrust_ptr grad_indices_iter(grad_indices_1D.data_ptr<int64_t>()); thrust_ptr input_indices_iter(input_indices_1D.data_ptr<int64_t>()); // store lower_bound of input indices at grad indices LongTensor input_indices_pos = at::empty_like(input_indices_1D, LEGACY_CONTIGUOUS_MEMORY_FORMAT); thrust_ptr input_indices_pos_iter(input_indices_pos.data_ptr<int64_t>()); thrust::lower_bound(policy, grad_indices_iter, grad_indices_iter + grad_nnz, input_indices_iter, input_indices_iter + input_nnz, input_indices_pos_iter); // config to run cuda kernel int64_t total_threads = input_nnz; const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), total_threads)); dim3 grid; TORCH_CHECK(cuda::getApplyGrid(total_threads, grid, curDevice), "_sparse_sum_backward_cuda: input too large or too many dimensions"); auto grad_indices_ti = getTensorInfo<int64_t, int64_t>(grad_indices_1D); auto input_indices_ti = getTensorInfo<int64_t, int64_t>(input_indices_1D); auto input_indices_pos_ti = getTensorInfo<int64_t, int64_t>(input_indices_pos); AT_DISPATCH_FLOATING_TYPES_AND_HALF(grad_values.scalar_type(), "_sparse_sum_backward_cuda", [&] { auto grad_values_expand_ti = getTensorInfo<scalar_t, int64_t>(grad_values_expand); auto grad_input_values_ti = getTensorInfo<scalar_t, int64_t>(grad_input_values); _sparse_sum_backward_cuda_kernel<scalar_t><<<grid, block, 0, stream>>>( total_threads, grad_indices_ti, input_indices_ti, input_indices_pos_ti, grad_values_expand_ti, grad_input_values_ti ); }); } return at::_sparse_coo_tensor_with_dims_and_tensors(input_sparse_dim, input_dense_dim, input_sizes, input_indices.clone(at::MemoryFormat::Contiguous), grad_input_values, grad.options()); } } }} // namespace at::native