hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
50ca423c8cb6b0a6a88832e126c616ad537d0724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/hip/block_reduce.cuh> #include <ATen/native/Resize.h> #include <ATen/native/IndexingUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/_conv_depthwise2d_native.h> #endif namespace at { namespace native { namespace { using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; template <typename scalar_t, int ndim, template <typename U> class PtrTraits = DefaultPtrTraits> PackedTensorAccessor32<scalar_t, ndim, PtrTraits> dummy_packed_accessor32() { std::array<int64_t, ndim> zeros{}; return {nullptr, zeros.data(), zeros.data()}; } template <int kSize, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_forward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, const PackedTensorAccessor32<scalar_t, 1, DefaultPtrTraits> bias, bool biasEnabled, index_t totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; acc_t value = biasEnabled ? static_cast<acc_t>(bias.data()[c]) : acc_t(0); const index_t offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #if !defined(USE_ROCM) #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const index_t offset = offset0 + h_in * inputWidth + w_in; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(input.data()[offset])); } ++weightOffset; } } output.data()[linearIndex] = static_cast<scalar_t>(value); } } template <int kSize, int stride, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_backward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_input, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, index_t totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; const int strideW = (stride != 0) ? stride : strideWidth; const int strideH = (stride != 0) ? stride : strideHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; acc_t value(0); #if !defined(USE_ROCM) #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #if defined(USE_ROCM) #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(grad_output.data()[offset])); } } ++weightOffset; } } } grad_input.data()[linearIndex] = static_cast<scalar_t>(value); } } template <typename scalar_t, typename index_t=unsigned> __global__ void conv_depthwise2d_grad_weight_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_weight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int channelStride = kernelWidth * kernelHeight; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; acc_t grad(0); const int laneId = threadIdx.x % C10_WARP_SIZE; const int batch = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (index_t idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) { // Need to calculate the following: batch position, and offset into the grad_output // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad += (static_cast<acc_t>(input.data()[inputOffset]) * static_cast<acc_t>(grad_output.data()[outputOffset])); } } } // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value extern __shared__ char smem[]; acc_t* buf = reinterpret_cast<acc_t*>(smem); acc_t tval = cuda_utils::BlockReduceSum(grad, buf); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to grad_weight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); grad_weight.data()[weightOffset] = static_cast<scalar_t>(tval); } } void conv_depthwise2d_forward_out( const Tensor &input, const Tensor &output, const Tensor &weight, const Tensor &bias, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(output.is_contiguous()); auto in_sizes = input.sizes(); auto w_sizes = weight.sizes(); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) TORCH_CHECK(w_sizes[1] == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels TORCH_CHECK(w_sizes[0] % in_sizes[1] == 0); // Bias has same # of channels as output const bool has_bias = bias.defined(); TORCH_CHECK(!has_bias || (bias.dim() <= 1 && bias.numel() == w_sizes[0])); // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int64_t height = in_sizes[2]; int64_t width = in_sizes[3]; int64_t outputChannels = w_sizes[0]; auto out_sizes = conv_output_size(in_sizes, weight.sizes(), {padH, padW}, {dH, dW}, {dilationH, dilationW}); const auto outputWidth = out_sizes[3]; const auto outputHeight = out_sizes[2]; resize_output(output, out_sizes); int64_t inputChannels = in_sizes[1]; int64_t depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(output)); int32_t n = output.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "conv_depthwise2d_forward_cuda", [&] { // Create PackedTensorAccessor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto weight_a = weight.packed_accessor32<scalar_t, 4>(); const auto output_a = output.packed_accessor32<scalar_t, 4>(); const auto bias_a = has_bias ? bias.packed_accessor32<scalar_t, 1>() : dummy_packed_accessor32<scalar_t, 1>(); if (kW == 3 && kH == 3) { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<3>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (kW == 1 && kH == 1) { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<1>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_forward_kernel<0>) , dim3(grid), dim3(block), 0, stream, input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } void conv_depthwise2d_backward_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking, as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); // Same # of filters as outputChannels TORCH_CHECK(weight.sizes()[0] == grad_output.sizes()[1]); // Resize Grainput_a auto in_sizes = input.sizes(); resize_output(grad_input, in_sizes); int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_input.is_contiguous()); // One thread per grainput_a value TORCH_CHECK(canUse32BitIndexMath(grad_input) && canUse32BitIndexMath(grad_output)); int32_t n = grad_input.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_backward_cuda", [&] { auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); auto grad_input_a = grad_input.packed_accessor32<scalar_t, 4>(); auto weight_a = weight.packed_accessor32<scalar_t, 4>(); if (kW == 3 && kH == 3) { if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<3, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else if (kW == 1 && kH == 1) { if (dW == 1 && dH == 1){ hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<1, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } } else if (dW == 1 && dH == 1) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 1>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 2>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( conv_depthwise2d_backward_kernel<0, 0>), dim3(grid), dim3(block), 0, stream, grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. int getGradParamsNumThreads(int batchSize) { //warp per item in a batch, up to a maximum constexpr int MAX_BLOCK_SIZE = 256; return ::min(batchSize * at::cuda::warp_size(), MAX_BLOCK_SIZE); } void conv_depthwise2d_grad_weight_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); auto in_sizes = input.sizes(); int batchSize = in_sizes[0]; int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; resize_output(grad_weight, {outputChannels, 1, kH, kW}); // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(grad_weight.is_contiguous()); // We parallelize so that each block computes a single value in grad_weight TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(grad_output)); int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); const auto stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_grad_weight_cuda", [&] { const auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto grad_weight_a = grad_weight.packed_accessor32<scalar_t, 4>(); using acc_t = at::acc_type<scalar_t, true>; int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(block.x % warp_size == 0); int smem = (block.x / warp_size) * sizeof(acc_t); hipLaunchKernelGGL(( conv_depthwise2d_grad_weight_kernel), dim3(grid), dim3(block), smem, stream, grad_output_a, input_a, grad_weight_a, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } // namespace (anonymous) const Tensor& conv_depthwise2d_cuda_out( const Tensor &input_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor &out) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); TORCH_CHECK(dilation.size() == 2); auto input = input_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_opt.has_value() && bias_opt->defined()) { return bias_opt->expect_contiguous(); } return c10::MaybeOwned<Tensor>::owned(c10::in_place); }(); conv_depthwise2d_forward_out( *input, out, *weight, *bias, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); return out; } Tensor conv_depthwise2d_cuda( const Tensor &input, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { auto out = at::empty({0}, input.options()); return conv_depthwise2d_cuda_out(input, weight, kernel_size, bias, stride, padding, dilation, out); } std::tuple<Tensor&, Tensor&> conv_depthwise2d_backward_cuda_out( const Tensor & grad_output_, const Tensor & self_, const Tensor & weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight) { auto grad_output = grad_output_.expect_contiguous(); if (grad_weight.defined()) { auto self = self_.expect_contiguous(); conv_depthwise2d_grad_weight_out( *self, *grad_output, grad_weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } if (grad_input.defined()) { auto weight = weight_.expect_contiguous(); conv_depthwise2d_backward_out( self_, *grad_output, grad_input, *weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } return std::forward_as_tuple(grad_input, grad_weight); } std::tuple<Tensor, Tensor> conv_depthwise2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool, 2> output_mask) { Tensor grad_input; Tensor grad_weight; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } return conv_depthwise2d_backward_cuda_out( grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight); } REGISTER_CUDA_DISPATCH(conv_depthwise2d_backward_stub, &conv_depthwise2d_backward_cuda); } // namespace native } // namespace at
50ca423c8cb6b0a6a88832e126c616ad537d0724.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/Dispatch.h> #include <ATen/AccumulateType.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/cuda/block_reduce.cuh> #include <ATen/native/Resize.h> #include <ATen/native/IndexingUtils.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/_conv_depthwise2d_native.h> #endif namespace at { namespace native { namespace { using at::cuda::detail::CUDA_NUM_THREADS; using at::cuda::detail::GET_BLOCKS; template <typename scalar_t, int ndim, template <typename U> class PtrTraits = DefaultPtrTraits> PackedTensorAccessor32<scalar_t, ndim, PtrTraits> dummy_packed_accessor32() { std::array<int64_t, ndim> zeros{}; return {nullptr, zeros.data(), zeros.data()}; } template <int kSize, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_forward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, const PackedTensorAccessor32<scalar_t, 1, DefaultPtrTraits> bias, bool biasEnabled, index_t totalElements, const int outputChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { //calculate n,c,h,w indices, replacing modulos by divide and multiply add, //result is same as would be in the code below //const int n = linearIndex / batchStride; //batchStride = outputChannels * outputHeight * outputWidth //const int c = (linearIndex / channelStride) % outputChannels; //channelStride = outputHeight * outputWidth //const int h = (linearIndex / outputWidth) % outputHeight; //const int w = linearIndex % outputWidth; int indtmp1 = linearIndex/outputWidth; const int w = linearIndex - indtmp1 * outputWidth; int indtmp2 = indtmp1/outputHeight; const int h = indtmp1 - indtmp2 * outputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/outputChannels; const int c = indtmp1 - indtmp2 * outputChannels; const int n = indtmp2; int inputChannel = c; int inputChannels = outputChannels; if (depthwiseMultiplier !=1) { inputChannel /= depthwiseMultiplier; inputChannels /= depthwiseMultiplier; } int weightOffset = c * kernelHeight * kernelWidth; acc_t value = biasEnabled ? static_cast<acc_t>(bias.data()[c]) : acc_t(0); const index_t offset0 = (n * inputChannels + inputChannel) * inputHeight * inputWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kH = 0; kH < KH_LIMIT; ++kH) { #if !defined(USE_ROCM) #pragma unroll #endif for (int kW = 0; kW < KW_LIMIT; ++kW) { const int h_in = -padHeight + h * strideHeight + kH * dilationHeight; const int w_in = -padWidth + w * strideWidth + kW * dilationWidth; if ((h_in >= 0) && (h_in < inputHeight) && (w_in >= 0) && (w_in < inputWidth)) { const index_t offset = offset0 + h_in * inputWidth + w_in; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(input.data()[offset])); } ++weightOffset; } } output.data()[linearIndex] = static_cast<scalar_t>(value); } } template <int kSize, int stride, typename scalar_t, typename index_t> __global__ void conv_depthwise2d_backward_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_input, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> weight, index_t totalElements, const int inputChannels, const int depthwiseMultiplier, const int outputChannels, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int KW_LIMIT = (kSize != 0) ? kSize : kernelWidth; const int KH_LIMIT = (kSize != 0) ? kSize : kernelHeight; const int strideW = (stride != 0) ? stride : strideWidth; const int strideH = (stride != 0) ? stride : strideHeight; CUDA_KERNEL_LOOP_TYPE(linearIndex, totalElements, index_t) { int indtmp1 = linearIndex/inputWidth; const int w = linearIndex - indtmp1 * inputWidth; int indtmp2 = indtmp1/inputHeight; const int h = indtmp1 - indtmp2 * inputHeight; indtmp1 = indtmp2; indtmp2 = indtmp1/inputChannels; const int c = indtmp1 - indtmp2 * inputChannels; const int n = indtmp2; acc_t value(0); #if !defined(USE_ROCM) #pragma unroll #endif for (int multiplier = 0; multiplier < depthwiseMultiplier; ++multiplier) { int och = (c * depthwiseMultiplier) + multiplier; int weightOffset = och * kernelHeight * kernelWidth; #if !defined(USE_ROCM) #pragma unroll #endif for (int kh = 0; kh < KH_LIMIT; ++kh) { #if defined(USE_ROCM) #pragma unroll #endif for (int kw = 0; kw < KW_LIMIT; ++kw) { int h_out = h + padHeight - kh * dilationHeight; int w_out = w + padWidth - kw * dilationWidth; if ((h_out % strideH == 0) && (w_out % strideW == 0)) { h_out = h_out / strideH; w_out = w_out / strideW; if ((h_out >= 0) && (h_out < outputHeight) && (w_out >= 0) && (w_out < outputWidth)) { const int offset = ((n * outputChannels + och) * outputHeight + h_out) * outputWidth + w_out; value += (static_cast<acc_t>(weight.data()[weightOffset]) * static_cast<acc_t>(grad_output.data()[offset])); } } ++weightOffset; } } } grad_input.data()[linearIndex] = static_cast<scalar_t>(value); } } template <typename scalar_t, typename index_t=unsigned> __global__ void conv_depthwise2d_grad_weight_kernel( const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_output, const PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> input, PackedTensorAccessor32<scalar_t, 4, DefaultPtrTraits> grad_weight, const int batchSize, const int inputChannels, const int kernelChannels, const int depthwiseMultiplier, const int inputWidth, const int inputHeight, const int outputWidth, const int outputHeight, const int kernelWidth, const int kernelHeight, const int strideWidth, const int strideHeight, const int padWidth, const int padHeight, const int dilationWidth, const int dilationHeight) { using acc_t = at::acc_type<scalar_t, true>; const int channelStride = kernelWidth * kernelHeight; // Each Block is responsible for accumulating over a permutation of // (channels x kH x kW), use blockIdx to determine which one int bidx = blockIdx.x; int kW = bidx % kernelWidth; int kH = (bidx / kernelWidth) % kernelHeight; int ch = (bidx / channelStride); // Need to calculate which input channel is associated with this filter // channel int inputCh = ch / depthwiseMultiplier; acc_t grad(0); const int laneId = threadIdx.x % C10_WARP_SIZE; const int batch = threadIdx.x / C10_WARP_SIZE; const int nwarps = blockDim.x / C10_WARP_SIZE; const int imageElements = outputWidth * outputHeight; // Use warp per item. In the original kernel, a threadblock was used to sum over NHW. // Here, we use a warp to sum values over HW dimension, and if batchSize is larger than the // number of warps, a warp would loop over remaining batch items (e.g. if there are 8 warps, // warp 0 would go over 0-8-16 etc image, warp 1 over 1-9-17 etc). Later in blockReduce, // all the warps will be reduced anyway, thus the full reduction will be over NHW, like it // should be. That allows to get rid of one modulo operation inside the loop (because n/batchIdx // now does not have to be computed through modulo, you are just looping over it), and // bring a nice speed-up. for (int batchIdx = batch; batchIdx < batchSize; batchIdx += nwarps){ // Warp-stride loop over elements in a batch item for (index_t idx = laneId; idx < imageElements; idx += C10_WARP_SIZE) { // Need to calculate the following: batch position, and offset into the grad_output // in height, and width. We can intuit the corresponding position in the input from // the other parameters we have int go_w_offset = idx % outputWidth; int go_h_offset = (idx / outputWidth); int i_w_offset = (go_w_offset * strideWidth) + (kW * dilationWidth) - padWidth; int i_h_offset = (go_h_offset * strideHeight) + (kH * dilationHeight) - padHeight; if (i_w_offset >= 0 && i_h_offset >= 0 && i_w_offset < inputWidth && i_h_offset < inputHeight) { int inputOffset = ((batchIdx * inputChannels + inputCh) * inputHeight + i_h_offset) * inputWidth + i_w_offset; int outputOffset = ((batchIdx * kernelChannels + ch) * outputHeight ) * outputWidth + idx; grad += (static_cast<acc_t>(input.data()[inputOffset]) * static_cast<acc_t>(grad_output.data()[outputOffset])); } } } // At this point each thread in the block has a local gradient, which we need to // accumulate prior to writing the global value extern __shared__ char smem[]; acc_t* buf = reinterpret_cast<acc_t*>(smem); acc_t tval = cuda_utils::BlockReduceSum(grad, buf); // After reduction, first thread in the block has the gradient, so its responsible // for writing it to grad_weight if (threadIdx.x == 0) { int weightOffset = kW + (kernelWidth * kH) + (kernelWidth * kernelHeight * ch); grad_weight.data()[weightOffset] = static_cast<scalar_t>(tval); } } void conv_depthwise2d_forward_out( const Tensor &input, const Tensor &output, const Tensor &weight, const Tensor &bias, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(output.is_contiguous()); auto in_sizes = input.sizes(); auto w_sizes = weight.sizes(); // We assume that the input and weight Tensors are shaped properly by // the caller, so we verify that here to some extent // Weight Tensor is shape (output_channels, 1, kH, kW) TORCH_CHECK(w_sizes[1] == 1); // Input Tensor is shape (N, input_channels, H, W) // We verify that the # of output_channels is a multiple of input_channels TORCH_CHECK(w_sizes[0] % in_sizes[1] == 0); // Bias has same # of channels as output const bool has_bias = bias.defined(); TORCH_CHECK(!has_bias || (bias.dim() <= 1 && bias.numel() == w_sizes[0])); // Following the behavior of other THCUNN functions, we shape the output // Tensor ourselves int64_t height = in_sizes[2]; int64_t width = in_sizes[3]; int64_t outputChannels = w_sizes[0]; auto out_sizes = conv_output_size(in_sizes, weight.sizes(), {padH, padW}, {dH, dW}, {dilationH, dilationW}); const auto outputWidth = out_sizes[3]; const auto outputHeight = out_sizes[2]; resize_output(output, out_sizes); int64_t inputChannels = in_sizes[1]; int64_t depthwiseMultiplier = outputChannels / inputChannels; // One thread per output value TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(output)); int32_t n = output.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "conv_depthwise2d_forward_cuda", [&] { // Create PackedTensorAccessor // Kernel currently relies upon all the Tensors to be contiguous, but we made // them contiguous above const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto weight_a = weight.packed_accessor32<scalar_t, 4>(); const auto output_a = output.packed_accessor32<scalar_t, 4>(); const auto bias_a = has_bias ? bias.packed_accessor32<scalar_t, 1>() : dummy_packed_accessor32<scalar_t, 1>(); if (kW == 3 && kH == 3) { conv_depthwise2d_forward_kernel<3> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (kW == 1 && kH == 1) { conv_depthwise2d_forward_kernel<1> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_forward_kernel<0> <<<grid, block, 0, stream>>>( input_a, output_a, weight_a, bias_a, has_bias, n, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } void conv_depthwise2d_backward_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(weight.numel() > 0 && weight.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking, as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); // Same # of filters as outputChannels TORCH_CHECK(weight.sizes()[0] == grad_output.sizes()[1]); // Resize Grainput_a auto in_sizes = input.sizes(); resize_output(grad_input, in_sizes); int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(weight.is_contiguous()); TORCH_CHECK(grad_input.is_contiguous()); // One thread per grainput_a value TORCH_CHECK(canUse32BitIndexMath(grad_input) && canUse32BitIndexMath(grad_output)); int32_t n = grad_input.numel(); int blocks = GET_BLOCKS(n); dim3 grid(blocks); dim3 block(CUDA_NUM_THREADS); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_backward_cuda", [&] { auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); auto grad_input_a = grad_input.packed_accessor32<scalar_t, 4>(); auto weight_a = weight.packed_accessor32<scalar_t, 4>(); if (kW == 3 && kH == 3) { if (dW == 1 && dH == 1){ conv_depthwise2d_backward_kernel<3, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<3, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<3, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else if (kW == 1 && kH == 1) { if (dW == 1 && dH == 1){ conv_depthwise2d_backward_kernel<1, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<1, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<1, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } else if (dW == 1 && dH == 1) { conv_depthwise2d_backward_kernel<0, 1><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else if (dW == 2 && dH == 2) { conv_depthwise2d_backward_kernel<0, 2><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { conv_depthwise2d_backward_kernel<0, 0><<<grid, block, 0, stream>>>( grad_output_a, grad_input_a, weight_a, n, inputChannels, depthwiseMultiplier, outputChannels, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } // Crude benchmarks suggest 256 is better than 512 and 1024 // TODO: Autotune/use better heuristics, improve speed more. int getGradParamsNumThreads(int batchSize) { //warp per item in a batch, up to a maximum constexpr int MAX_BLOCK_SIZE = 256; return std::min(batchSize * at::cuda::warp_size(), MAX_BLOCK_SIZE); } void conv_depthwise2d_grad_weight_out( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight, const int kW, const int kH, const int dW, const int dH, const int padW, const int padH, const int dilationW, const int dilationH) { // Only handle 4D Input Tensors for now TORCH_CHECK(input.numel() > 0 && input.dim() == 4); TORCH_CHECK(grad_output.numel() > 0 && grad_output.dim() == 4); // Minimal shape checking as above // Same # of elements in batch TORCH_CHECK(input.sizes()[0] == grad_output.sizes()[0]); auto in_sizes = input.sizes(); int batchSize = in_sizes[0]; int inputChannels = in_sizes[1]; int height = in_sizes[2]; int width = in_sizes[3]; auto gO_sizes = grad_output.sizes(); int outputChannels = gO_sizes[1]; int outputHeight = gO_sizes[2]; int outputWidth = gO_sizes[3]; int depthwiseMultiplier = outputChannels / inputChannels; resize_output(grad_weight, {outputChannels, 1, kH, kW}); // Kernel currently relies upon all the Tensors to be contiguous TORCH_CHECK(grad_output.is_contiguous()); TORCH_CHECK(input.is_contiguous()); TORCH_CHECK(grad_weight.is_contiguous()); // We parallelize so that each block computes a single value in grad_weight TORCH_CHECK(canUse32BitIndexMath(input) && canUse32BitIndexMath(grad_output)); int blocks = outputChannels * kH * kW; // Make sure we have enough threads to perform the reduction, and use this number // to create the shared memory size for the reduction dim3 grid(blocks); dim3 block(getGradParamsNumThreads(batchSize)); const auto stream = c10::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, grad_output.scalar_type(), "conv_depthwise2d_grad_weight_cuda", [&] { const auto grad_output_a = grad_output.packed_accessor32<scalar_t, 4>(); const auto input_a = input.packed_accessor32<scalar_t, 4>(); const auto grad_weight_a = grad_weight.packed_accessor32<scalar_t, 4>(); using acc_t = at::acc_type<scalar_t, true>; int warp_size = at::cuda::warp_size(); TORCH_INTERNAL_ASSERT(block.x % warp_size == 0); int smem = (block.x / warp_size) * sizeof(acc_t); conv_depthwise2d_grad_weight_kernel<<<grid, block, smem, stream>>>( grad_output_a, input_a, grad_weight_a, batchSize, inputChannels, outputChannels, depthwiseMultiplier, width, height, outputWidth, outputHeight, kW, kH, dW, dH, padW, padH, dilationW, dilationH); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } // namespace (anonymous) const Tensor& conv_depthwise2d_cuda_out( const Tensor &input_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_opt, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, const Tensor &out) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); TORCH_CHECK(dilation.size() == 2); auto input = input_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_opt.has_value() && bias_opt->defined()) { return bias_opt->expect_contiguous(); } return c10::MaybeOwned<Tensor>::owned(c10::in_place); }(); conv_depthwise2d_forward_out( *input, out, *weight, *bias, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); return out; } Tensor conv_depthwise2d_cuda( const Tensor &input, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) { auto out = at::empty({0}, input.options()); return conv_depthwise2d_cuda_out(input, weight, kernel_size, bias, stride, padding, dilation, out); } std::tuple<Tensor&, Tensor&> conv_depthwise2d_backward_cuda_out( const Tensor & grad_output_, const Tensor & self_, const Tensor & weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, Tensor & grad_input, Tensor & grad_weight) { auto grad_output = grad_output_.expect_contiguous(); if (grad_weight.defined()) { auto self = self_.expect_contiguous(); conv_depthwise2d_grad_weight_out( *self, *grad_output, grad_weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } if (grad_input.defined()) { auto weight = weight_.expect_contiguous(); conv_depthwise2d_backward_out( self_, *grad_output, grad_input, *weight, kernel_size[1], kernel_size[0], stride[1], stride[0], padding[1], padding[0], dilation[1], dilation[0]); } return std::forward_as_tuple(grad_input, grad_weight); } std::tuple<Tensor, Tensor> conv_depthwise2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool, 2> output_mask) { Tensor grad_input; Tensor grad_weight; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } return conv_depthwise2d_backward_cuda_out( grad_output, self, weight, kernel_size, stride, padding, dilation, grad_input, grad_weight); } REGISTER_CUDA_DISPATCH(conv_depthwise2d_backward_stub, &conv_depthwise2d_backward_cuda); } // namespace native } // namespace at
88c9e23c9d4ebc2c6cc567fc0149f733015cba88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Negate.h" #include "Negate.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* set each entry to its negtive value (CUDA Kernel) >> a - pointer to the input data array >> b - pointer to the output data array >> size - size of the data array */ __global__ void KernelNegate(DTYPE * a, DTYPE * b, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) b[i] = -a[i]; } /* set each entry to its negtive value (CUDA Kernel) This is for float16 computation >> a - pointer to the input data array >> b - pointer to the output data array >> size - size of the data array */ __global__ void KernelNegate(__half * a, __half * b, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) if (i < size) b[i] = __hsub(__float2half(0), a[i]); #else if (i < size) b[i] = __float2half(-__half2float(a[i])); #endif } /* set each entry to its negtive value >> a - input tensor >> b - output tensor */ void _CudaNegate(const XTensor * a, XTensor * b) { CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((a->isSparse == false), "TODO!"); int gridSize[3]; int blockSize[3]; GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); dim3 blocks(gridSize[0]); dim3 threads(blockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (a->dataType == DEFAULT_DTYPE) { KernelNegate << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum); } else if (a->dataType == X_FLOAT16) { KernelNegate << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum); } else { ShowNTErrors("TODO!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
88c9e23c9d4ebc2c6cc567fc0149f733015cba88.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northestern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Negate.h" #include "Negate.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* set each entry to its negtive value (CUDA Kernel) >> a - pointer to the input data array >> b - pointer to the output data array >> size - size of the data array */ __global__ void KernelNegate(DTYPE * a, DTYPE * b, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) b[i] = -a[i]; } /* set each entry to its negtive value (CUDA Kernel) This is for float16 computation >> a - pointer to the input data array >> b - pointer to the output data array >> size - size of the data array */ __global__ void KernelNegate(__half * a, __half * b, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) if (i < size) b[i] = __hsub(__float2half(0), a[i]); #else if (i < size) b[i] = __float2half(-__half2float(a[i])); #endif } /* set each entry to its negtive value >> a - input tensor >> b - output tensor */ void _CudaNegate(const XTensor * a, XTensor * b) { CheckNTErrors((XTensor::IsSameShaped(a, b)), "Input tensors should have the same type!"); CheckNTErrors((a->isSparse == false), "TODO!"); int gridSize[3]; int blockSize[3]; GDevs.GetCudaThread(a->devID, a->unitNum, gridSize, blockSize); dim3 blocks(gridSize[0]); dim3 threads(blockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (a->dataType == DEFAULT_DTYPE) { KernelNegate << <blocks, threads >> >((DTYPE*)a->data, (DTYPE*)b->data, a->unitNum); } else if (a->dataType == X_FLOAT16) { KernelNegate << <blocks, threads >> >((__half*)a->data, (__half*)b->data, a->unitNum); } else { ShowNTErrors("TODO!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
ff9a4e0d8bc3bf380276672abd17165b41e91a0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include"matrix.h" #include<cstdlib> #include<cmath> #include"layer.h" #include<iostream> // Matrix multiplication kernel called by MatMul() __device__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < A.height && col < B.width) { for(int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } } __device__ void AddSigmoidKernel(Matrix A, Matrix B, Matrix C) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < A.height && col < A.width) { C.elements[row * C.width + col] = A.elements[row * A.width + col]\ + B.elements[row * B.width + col]; C.elements[row * C.width + col] = \ 1.0 / (1.0 + ::exp(-C.elements[row * C.width + col])); } } __global__ void forwardKernel(Matrix input, Matrix weight, Matrix bias, \ Matrix output) { MatMulKernel(input, weight, output); __syncthreads(); AddSigmoidKernel(output, bias, output); __syncthreads(); } extern "C" int deviceQuery() { // By default, we use device 0, otherwise //we override the device ID based on what is provided at the command line int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { //printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; return block_size; } extern "C" void forwardGpu(Matrix input, Net* n, Matrix output) { //std::cout<<"in forwardGPU...\n"; hipError_t err; // copy the net parameter // compute dimGrid, dimBlock int block_size = deviceQuery(); dim3 dimBlock(block_size, block_size); //std::cout<<block_size<<" "; dim3 dimGrid(4096 / dimBlock.x, 4096 / dimBlock.y); //device memory.... Matrix h_input; h_input.width = 1; h_input.height = input.height; h_input.elements = (float*)malloc(input.height*sizeof(float)); for(int i = 0; i < input.height; ++i) h_input.elements[i] = input.elements[i]; for(int i = 0; i < n->numLayer; ++i) { // allocate bias int outDim = n->layerOutputDim[i]; Matrix d_bias; d_bias.width = 1; d_bias.height = outDim; d_bias.elements = NULL; size_t size = d_bias.height * sizeof(float); err = hipMalloc((void**)&d_bias.elements, size); if(err != hipSuccess) { std::cout<<"Malloc failed...\n"; } hipMemcpy(d_bias.elements, n->layers[i].bias.elements, size,\ hipMemcpyHostToDevice); // allocate weights Matrix d_weight; d_weight.width = n->layerOutputDim[i]; d_weight.height = n->layerInputDim[i]; // copy weight parameters size = d_weight.width * d_weight.height * sizeof(float); err = hipMalloc(&d_weight.elements, size); if(err !=hipSuccess) std::cout<<"Malloc failed...\n"; hipMemcpy(d_weight.elements, n->layers[i].weight.elements, \ size, hipMemcpyHostToDevice); // allocate output Matrix d_outTemp; d_outTemp.width = 1; d_outTemp.height = n->layerOutputDim[i]; size = n->layerOutputDim[i] * sizeof(float); err = hipMalloc(&d_outTemp.elements, size); if(err != hipSuccess) std::cout<<"Malloc d_outtemp failed\n"; // allocate inputs Matrix d_input; d_input.height = n->layerInputDim[i]; d_input.width = 1; size = d_input.height * sizeof(float); err = hipMalloc(&d_input.elements, size); if(err != hipSuccess) std::cout<<"cudamalloc d_input failed"; hipMemcpy(d_input.elements, h_input.elements, \ size, hipMemcpyHostToDevice); // Invoke kernel calls hipLaunchKernelGGL(( forwardKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_input, d_weight, d_bias,\ d_outTemp); //std::cout<<"after kernel invoked\n"; // copy current output; Matrix h_output; h_output.width = 1; h_output.height = n->layerOutputDim[i]; h_output.elements = new float[h_output.height]; size = h_output.height * sizeof(float); hipMemcpy(h_output.elements, d_outTemp.elements, size, hipMemcpyDeviceToHost); // copy h_output to h_input; delete h_input.elements; h_input.width = 1; h_input.height = h_output.height; h_input.elements = new float[h_output.height]; for(int j = 0; j < h_output.height; ++j) h_input.elements[i] = h_output.elements[i]; // free memory.. if(i == n->numLayer -1 ) { for(int k = 0; k < h_output.height; ++k) output.elements[k] = h_output.elements[k]; } delete h_output.elements; hipFree(d_input.elements); hipFree(d_outTemp.elements); hipFree(d_bias.elements); hipFree(d_weight.elements); } // copy output to return }
ff9a4e0d8bc3bf380276672abd17165b41e91a0c.cu
#include<cuda_runtime.h> #include"matrix.h" #include<cstdlib> #include<cmath> #include"layer.h" #include<iostream> // Matrix multiplication kernel called by MatMul() __device__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Each thread computes one element of C // by accumulating results into Cvalue float Cvalue = 0; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < A.height && col < B.width) { for(int e = 0; e < A.width; ++e) Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col]; C.elements[row * C.width + col] = Cvalue; } } __device__ void AddSigmoidKernel(Matrix A, Matrix B, Matrix C) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < A.height && col < A.width) { C.elements[row * C.width + col] = A.elements[row * A.width + col]\ + B.elements[row * B.width + col]; C.elements[row * C.width + col] = \ 1.0 / (1.0 + std::exp(-C.elements[row * C.width + col])); } } __global__ void forwardKernel(Matrix input, Matrix weight, Matrix bias, \ Matrix output) { MatMulKernel(input, weight, output); __syncthreads(); AddSigmoidKernel(output, bias, output); __syncthreads(); } extern "C" int deviceQuery() { // By default, we use device 0, otherwise //we override the device ID based on what is provided at the command line int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { //printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; return block_size; } extern "C" void forwardGpu(Matrix input, Net* n, Matrix output) { //std::cout<<"in forwardGPU...\n"; cudaError_t err; // copy the net parameter // compute dimGrid, dimBlock int block_size = deviceQuery(); dim3 dimBlock(block_size, block_size); //std::cout<<block_size<<" "; dim3 dimGrid(4096 / dimBlock.x, 4096 / dimBlock.y); //device memory.... Matrix h_input; h_input.width = 1; h_input.height = input.height; h_input.elements = (float*)malloc(input.height*sizeof(float)); for(int i = 0; i < input.height; ++i) h_input.elements[i] = input.elements[i]; for(int i = 0; i < n->numLayer; ++i) { // allocate bias int outDim = n->layerOutputDim[i]; Matrix d_bias; d_bias.width = 1; d_bias.height = outDim; d_bias.elements = NULL; size_t size = d_bias.height * sizeof(float); err = cudaMalloc((void**)&d_bias.elements, size); if(err != cudaSuccess) { std::cout<<"Malloc failed...\n"; } cudaMemcpy(d_bias.elements, n->layers[i].bias.elements, size,\ cudaMemcpyHostToDevice); // allocate weights Matrix d_weight; d_weight.width = n->layerOutputDim[i]; d_weight.height = n->layerInputDim[i]; // copy weight parameters size = d_weight.width * d_weight.height * sizeof(float); err = cudaMalloc(&d_weight.elements, size); if(err !=cudaSuccess) std::cout<<"Malloc failed...\n"; cudaMemcpy(d_weight.elements, n->layers[i].weight.elements, \ size, cudaMemcpyHostToDevice); // allocate output Matrix d_outTemp; d_outTemp.width = 1; d_outTemp.height = n->layerOutputDim[i]; size = n->layerOutputDim[i] * sizeof(float); err = cudaMalloc(&d_outTemp.elements, size); if(err != cudaSuccess) std::cout<<"Malloc d_outtemp failed\n"; // allocate inputs Matrix d_input; d_input.height = n->layerInputDim[i]; d_input.width = 1; size = d_input.height * sizeof(float); err = cudaMalloc(&d_input.elements, size); if(err != cudaSuccess) std::cout<<"cudamalloc d_input failed"; cudaMemcpy(d_input.elements, h_input.elements, \ size, cudaMemcpyHostToDevice); // Invoke kernel calls forwardKernel<<<dimGrid, dimBlock>>>(d_input, d_weight, d_bias,\ d_outTemp); //std::cout<<"after kernel invoked\n"; // copy current output; Matrix h_output; h_output.width = 1; h_output.height = n->layerOutputDim[i]; h_output.elements = new float[h_output.height]; size = h_output.height * sizeof(float); cudaMemcpy(h_output.elements, d_outTemp.elements, size, cudaMemcpyDeviceToHost); // copy h_output to h_input; delete h_input.elements; h_input.width = 1; h_input.height = h_output.height; h_input.elements = new float[h_output.height]; for(int j = 0; j < h_output.height; ++j) h_input.elements[i] = h_output.elements[i]; // free memory.. if(i == n->numLayer -1 ) { for(int k = 0; k < h_output.height; ++k) output.elements[k] = h_output.elements[k]; } delete h_output.elements; cudaFree(d_input.elements); cudaFree(d_outTemp.elements); cudaFree(d_bias.elements); cudaFree(d_weight.elements); } // copy output to return }
bf7bf2c84fa04256196f4773d07e3ac959114ec6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of hipcub::BlockReduce * * To compile using the command line: * nvcc -arch=sm_XX example_block_reduce.cu -I../.. -lcudart -O3 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <iostream> #include <hipcub/hipcub.hpp> #include <cub/block/block_store.cuh> #include <hipcub/hipcub.hpp> #include "../../test/test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- /// Verbose output bool g_verbose = false; /// Timing iterations int g_timing_iterations = 100; /// Default grid size int g_grid_size = 1; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide exclusive prefix sum over integers */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM> __global__ void BlockSumKernel( int *d_in, // Tile of input int *d_out, // Tile aggregate clock_t *d_elapsed) // Elapsed cycle count of block reduction { // Specialize BlockReduce type for our thread block typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT; // Shared memory __shared__ typename BlockReduceT::TempStorage temp_storage; // Per-thread tile data int data[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data); // Start cycle timer clock_t start = clock(); // Compute sum int aggregate = BlockReduceT(temp_storage).Sum(data); // Stop cycle timer clock_t stop = clock(); // Store aggregate and elapsed clocks if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; *d_out = aggregate; } } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize reduction problem (and solution). * Returns the aggregate */ int Initialize(int *h_in, int num_items) { int inclusive = 0; for (int i = 0; i < num_items; ++i) { h_in[i] = i % 17; inclusive += h_in[i]; } return inclusive; } /** * Test thread block reduction */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM> void Test() { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Allocate host arrays int *h_in = new int[TILE_SIZE]; int *h_gpu = new int[TILE_SIZE + 1]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, TILE_SIZE); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; clock_t *d_elapsed = NULL; hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); hipMalloc((void**)&d_out, sizeof(int) * 1); hipMalloc((void**)&d_elapsed, sizeof(clock_t)); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < TILE_SIZE; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Kernel props int max_sm_occupancy; CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS)); // Copy problem to device hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice); printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n", (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS", TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy); // Run aggregate/prefix kernel hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0, d_in, d_out, d_elapsed); // Check total aggregate printf("\tAggregate: "); int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Run this several times and average the performance results GpuTimer timer; float elapsed_millis = 0.0; clock_t elapsed_clocks = 0; for (int i = 0; i < g_timing_iterations; ++i) { // Copy problem to device hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice); timer.Start(); // Run aggregate/prefix kernel hipLaunchKernelGGL(( BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>), dim3(g_grid_size), dim3(BLOCK_THREADS), 0, 0, d_in, d_out, d_elapsed); timer.Stop(); elapsed_millis += timer.ElapsedMillis(); // Copy clocks from device clock_t clocks; CubDebugExit(hipMemcpy(&clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost)); elapsed_clocks += clocks; } // Check for kernel errors and STDIO from the kernel, if any CubDebugExit(hipPeekAtLastError()); CubDebugExit(hipDeviceSynchronize()); // Display timing results float avg_millis = elapsed_millis / g_timing_iterations; float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f; float avg_clocks = float(elapsed_clocks) / g_timing_iterations; float avg_clocks_per_item = avg_clocks / TILE_SIZE; printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks); printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item); printf("\tAverage kernel millis: %.4f\n", avg_millis); printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec); // Cleanup if (h_in) delete[] h_in; if (h_gpu) delete[] h_gpu; if (d_in) hipFree(d_in); if (d_out) hipFree(d_out); if (d_elapsed) hipFree(d_elapsed); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("grid-size", g_grid_size); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--i=<timing iterations>] " "[--grid-size=<grid size>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Run tests Test<1024, 1, BLOCK_REDUCE_RAKING>(); Test<512, 2, BLOCK_REDUCE_RAKING>(); Test<256, 4, BLOCK_REDUCE_RAKING>(); Test<128, 8, BLOCK_REDUCE_RAKING>(); Test<64, 16, BLOCK_REDUCE_RAKING>(); Test<32, 32, BLOCK_REDUCE_RAKING>(); Test<16, 64, BLOCK_REDUCE_RAKING>(); printf("-------------\n"); Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>(); return 0; }
bf7bf2c84fa04256196f4773d07e3ac959114ec6.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of cub::BlockReduce * * To compile using the command line: * nvcc -arch=sm_XX example_block_reduce.cu -I../.. -lcudart -O3 * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <iostream> #include <cub/block/block_load.cuh> #include <cub/block/block_store.cuh> #include <cub/block/block_reduce.cuh> #include "../../test/test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- /// Verbose output bool g_verbose = false; /// Timing iterations int g_timing_iterations = 100; /// Default grid size int g_grid_size = 1; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide exclusive prefix sum over integers */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM> __global__ void BlockSumKernel( int *d_in, // Tile of input int *d_out, // Tile aggregate clock_t *d_elapsed) // Elapsed cycle count of block reduction { // Specialize BlockReduce type for our thread block typedef BlockReduce<int, BLOCK_THREADS, ALGORITHM> BlockReduceT; // Shared memory __shared__ typename BlockReduceT::TempStorage temp_storage; // Per-thread tile data int data[ITEMS_PER_THREAD]; LoadDirectStriped<BLOCK_THREADS>(threadIdx.x, d_in, data); // Start cycle timer clock_t start = clock(); // Compute sum int aggregate = BlockReduceT(temp_storage).Sum(data); // Stop cycle timer clock_t stop = clock(); // Store aggregate and elapsed clocks if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; *d_out = aggregate; } } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize reduction problem (and solution). * Returns the aggregate */ int Initialize(int *h_in, int num_items) { int inclusive = 0; for (int i = 0; i < num_items; ++i) { h_in[i] = i % 17; inclusive += h_in[i]; } return inclusive; } /** * Test thread block reduction */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockReduceAlgorithm ALGORITHM> void Test() { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Allocate host arrays int *h_in = new int[TILE_SIZE]; int *h_gpu = new int[TILE_SIZE + 1]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, TILE_SIZE); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; clock_t *d_elapsed = NULL; cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); cudaMalloc((void**)&d_out, sizeof(int) * 1); cudaMalloc((void**)&d_elapsed, sizeof(clock_t)); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < TILE_SIZE; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Kernel props int max_sm_occupancy; CubDebugExit(MaxSmOccupancy(max_sm_occupancy, BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM>, BLOCK_THREADS)); // Copy problem to device cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); printf("BlockReduce algorithm %s on %d items (%d timing iterations, %d blocks, %d threads, %d items per thread, %d SM occupancy):\n", (ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : "BLOCK_REDUCE_WARP_REDUCTIONS", TILE_SIZE, g_timing_iterations, g_grid_size, BLOCK_THREADS, ITEMS_PER_THREAD, max_sm_occupancy); // Run aggregate/prefix kernel BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>( d_in, d_out, d_elapsed); // Check total aggregate printf("\tAggregate: "); int compare = CompareDeviceResults(&h_aggregate, d_out, 1, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Run this several times and average the performance results GpuTimer timer; float elapsed_millis = 0.0; clock_t elapsed_clocks = 0; for (int i = 0; i < g_timing_iterations; ++i) { // Copy problem to device cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); timer.Start(); // Run aggregate/prefix kernel BlockSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD, ALGORITHM><<<g_grid_size, BLOCK_THREADS>>>( d_in, d_out, d_elapsed); timer.Stop(); elapsed_millis += timer.ElapsedMillis(); // Copy clocks from device clock_t clocks; CubDebugExit(cudaMemcpy(&clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost)); elapsed_clocks += clocks; } // Check for kernel errors and STDIO from the kernel, if any CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Display timing results float avg_millis = elapsed_millis / g_timing_iterations; float avg_items_per_sec = float(TILE_SIZE * g_grid_size) / avg_millis / 1000.0f; float avg_clocks = float(elapsed_clocks) / g_timing_iterations; float avg_clocks_per_item = avg_clocks / TILE_SIZE; printf("\tAverage BlockReduce::Sum clocks: %.3f\n", avg_clocks); printf("\tAverage BlockReduce::Sum clocks per item: %.3f\n", avg_clocks_per_item); printf("\tAverage kernel millis: %.4f\n", avg_millis); printf("\tAverage million items / sec: %.4f\n", avg_items_per_sec); // Cleanup if (h_in) delete[] h_in; if (h_gpu) delete[] h_gpu; if (d_in) cudaFree(d_in); if (d_out) cudaFree(d_out); if (d_elapsed) cudaFree(d_elapsed); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("grid-size", g_grid_size); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--i=<timing iterations>] " "[--grid-size=<grid size>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Run tests Test<1024, 1, BLOCK_REDUCE_RAKING>(); Test<512, 2, BLOCK_REDUCE_RAKING>(); Test<256, 4, BLOCK_REDUCE_RAKING>(); Test<128, 8, BLOCK_REDUCE_RAKING>(); Test<64, 16, BLOCK_REDUCE_RAKING>(); Test<32, 32, BLOCK_REDUCE_RAKING>(); Test<16, 64, BLOCK_REDUCE_RAKING>(); printf("-------------\n"); Test<1024, 1, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<512, 2, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<256, 4, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<128, 8, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<64, 16, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<32, 32, BLOCK_REDUCE_WARP_REDUCTIONS>(); Test<16, 64, BLOCK_REDUCE_WARP_REDUCTIONS>(); return 0; }
5ac606f3705e448fe5971b2c9f9fa325bccd9f9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "paddle/fluid/operators/cvm_op.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T> __global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width, const T* X, T* Y, int64_t numel) { CUDA_KERNEL_LOOP(i, numel) { if (use_cvm) { if (i % item_width == 0) { Y[i] = log(X[i] + 1); } else if (i % item_width == 1) { Y[i] = log(X[i] + 1) - log(X[i - 1] + 1); } else { Y[i] = X[i]; } } else { Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2]; } } } template <typename T> __global__ void CvmGradComputeKernel(const bool use_cvm, const int64_t item_width, const T* CVM, const T* DY, T* DX, bool has_lod, const size_t* lod, size_t lod_size, int64_t numel) { CUDA_KERNEL_LOOP(i, numel) { int offset = i % item_width; if (offset <= 1) { int cvm_id = i / item_width; if (has_lod) { int low = 1; int high = lod_size - 1; while (low < high) { int mid = (low + high) / 2; if (cvm_id < lod[mid]) high = mid; else low = mid + 1; } cvm_id = low - 1; } DX[i] = CVM[2 * cvm_id + offset]; } else { if (use_cvm) { DX[i] = DY[i]; } else { DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2]; } } } } template <typename T> class CVMCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const auto* x = context.Input<LoDTensor>("X"); const T* x_data = x->data<T>(); auto batch_size = x->dims()[0]; auto numel = x->numel(); auto item_size = numel / batch_size; auto use_cvm = context.Attr<bool>("use_cvm"); auto* y = context.Output<LoDTensor>("Y"); T* y_data = y->mutable_data<T>(context.GetPlace()); // for Input X do not have Lod Information. auto stream = context.template device_context<phi::GPUContext>().stream(); if (x->NumLevels() == 0) { hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, use_cvm, item_size, x_data, y_data, y->numel()); } else { auto lod = x->lod()[0]; PADDLE_ENFORCE_EQ( batch_size, lod[lod.size() - 1], platform::errors::PreconditionNotMet( "Input(X)'s dim[0] must be equal to last element of lod")); hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, use_cvm, item_size, x_data, y_data, y->numel()); } } }; template <typename T> class CVMGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* dx = context.Output<LoDTensor>(framework::GradVarName("X")); T* dx_data = dx->mutable_data<T>(context.GetPlace()); const Tensor* cvm = context.Input<Tensor>("CVM"); const T* cvm_data = cvm->data<T>(); const auto* dOut = context.Input<framework::LoDTensor>(framework::GradVarName("Y")); const T* dout_data = dOut->data<T>(); auto use_cvm = context.Attr<bool>("use_cvm"); auto offset = 2; auto batch_size = dx->dims()[0]; auto dx_numel = dx->numel(); auto item_size = dx_numel / batch_size; // for Input X do not have Lod Information. auto stream = context.template device_context<phi::GPUContext>().stream(); if (dx->NumLevels() == 0) { hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0, dx_numel); } else { auto lod = dx->lod()[0]; PADDLE_ENFORCE_EQ( batch_size, lod[lod.size() - 1], platform::errors::PreconditionNotMet( "Output(X@GRAD)'s dim[0] must be equal to last element of lod")); paddle::framework::MixVector<size_t> mixv_lod(&lod); hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, use_cvm, item_size, cvm_data, dout_data, dx_data, true, mixv_lod.CUDAData(context.GetPlace()), lod.size(), dx_numel); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>, ops::CVMCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>, ops::CVMGradCUDAKernel<double>);
5ac606f3705e448fe5971b2c9f9fa325bccd9f9e.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include "paddle/fluid/operators/cvm_op.h" #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using platform::PADDLE_CUDA_NUM_THREADS; using Tensor = framework::Tensor; using LoDTensor = framework::LoDTensor; template <typename T> __global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width, const T* X, T* Y, int64_t numel) { CUDA_KERNEL_LOOP(i, numel) { if (use_cvm) { if (i % item_width == 0) { Y[i] = log(X[i] + 1); } else if (i % item_width == 1) { Y[i] = log(X[i] + 1) - log(X[i - 1] + 1); } else { Y[i] = X[i]; } } else { Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2]; } } } template <typename T> __global__ void CvmGradComputeKernel(const bool use_cvm, const int64_t item_width, const T* CVM, const T* DY, T* DX, bool has_lod, const size_t* lod, size_t lod_size, int64_t numel) { CUDA_KERNEL_LOOP(i, numel) { int offset = i % item_width; if (offset <= 1) { int cvm_id = i / item_width; if (has_lod) { int low = 1; int high = lod_size - 1; while (low < high) { int mid = (low + high) / 2; if (cvm_id < lod[mid]) high = mid; else low = mid + 1; } cvm_id = low - 1; } DX[i] = CVM[2 * cvm_id + offset]; } else { if (use_cvm) { DX[i] = DY[i]; } else { DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2]; } } } } template <typename T> class CVMCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const auto* x = context.Input<LoDTensor>("X"); const T* x_data = x->data<T>(); auto batch_size = x->dims()[0]; auto numel = x->numel(); auto item_size = numel / batch_size; auto use_cvm = context.Attr<bool>("use_cvm"); auto* y = context.Output<LoDTensor>("Y"); T* y_data = y->mutable_data<T>(context.GetPlace()); // for Input X do not have Lod Information. auto stream = context.template device_context<phi::GPUContext>().stream(); if (x->NumLevels() == 0) { CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( use_cvm, item_size, x_data, y_data, y->numel()); } else { auto lod = x->lod()[0]; PADDLE_ENFORCE_EQ( batch_size, lod[lod.size() - 1], platform::errors::PreconditionNotMet( "Input(X)'s dim[0] must be equal to last element of lod")); CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( use_cvm, item_size, x_data, y_data, y->numel()); } } }; template <typename T> class CVMGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { auto* dx = context.Output<LoDTensor>(framework::GradVarName("X")); T* dx_data = dx->mutable_data<T>(context.GetPlace()); const Tensor* cvm = context.Input<Tensor>("CVM"); const T* cvm_data = cvm->data<T>(); const auto* dOut = context.Input<framework::LoDTensor>(framework::GradVarName("Y")); const T* dout_data = dOut->data<T>(); auto use_cvm = context.Attr<bool>("use_cvm"); auto offset = 2; auto batch_size = dx->dims()[0]; auto dx_numel = dx->numel(); auto item_size = dx_numel / batch_size; // for Input X do not have Lod Information. auto stream = context.template device_context<phi::GPUContext>().stream(); if (dx->NumLevels() == 0) { CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0, dx_numel); } else { auto lod = dx->lod()[0]; PADDLE_ENFORCE_EQ( batch_size, lod[lod.size() - 1], platform::errors::PreconditionNotMet( "Output(X@GRAD)'s dim[0] must be equal to last element of lod")); paddle::framework::MixVector<size_t> mixv_lod(&lod); CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(use_cvm, item_size, cvm_data, dout_data, dx_data, true, mixv_lod.CUDAData(context.GetPlace()), lod.size(), dx_numel); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>, ops::CVMCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>, ops::CVMGradCUDAKernel<double>);
93fa09dc2e147a78f34c2b55a011b264d1491a5a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_intermediate, d_in); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0, d_out, d_intermediate); } else { hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0, d_out, d_intermediate); } } int main(int argc, char **argv) { int deviceCount; hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; hipSetDevice(dev); hipDeviceProp_t devProps; if (hipGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %d B; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 10; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] h_in[i] = -1.0f + (float)random()/((float)RAND_MAX/2.0f); //h_in[i] = i; //printf("%f\n", h_in[i]); sum += h_in[i]; } printf("sum using serial reduce: %f\n", sum); // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory hipMalloc((void **) &d_in, ARRAY_BYTES); hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated hipMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); const int itt = 2; // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); hipEventRecord(start, 0); for (int i = 0; i < itt; i++) { hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice); reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); } hipEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); hipEventRecord(start, 0); for (int i = 0; i < itt; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } hipEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= (float)itt; // 100 trials // copy back the sum from GPU float h_out; hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost); printf("sum using kernel: %f\n", h_out); printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation hipFree(d_in); hipFree(d_intermediate); hipFree(d_out); return 0; }
93fa09dc2e147a78f34c2b55a011b264d1491a5a.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void global_reduce_kernel(float * d_out, float * d_in) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // do reduction in global mem for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { d_in[myId] += d_in[myId + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = d_in[myId]; } } __global__ void shmem_reduce_kernel(float * d_out, const float * d_in) { // sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>> extern __shared__ float sdata[]; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; // load shared mem from global mem sdata[tid] = d_in[myId]; __syncthreads(); // make sure entire block is loaded! // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); // make sure all adds at one stage are done! } // only thread 0 writes result for this block back to global mem if (tid == 0) { d_out[blockIdx.x] = sdata[0]; } } void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory) { // assumes that size is not greater than maxThreadsPerBlock^2 // and that size is a multiple of maxThreadsPerBlock const int maxThreadsPerBlock = 1024; int threads = maxThreadsPerBlock; int blocks = size / maxThreadsPerBlock; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_intermediate, d_in); } else { global_reduce_kernel<<<blocks, threads>>> (d_intermediate, d_in); } // now we're down to one block left, so reduce it threads = blocks; // launch one thread for each block in prev step blocks = 1; if (usesSharedMemory) { shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>> (d_out, d_intermediate); } else { global_reduce_kernel<<<blocks, threads>>> (d_out, d_intermediate); } } int main(int argc, char **argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); cudaDeviceProp devProps; if (cudaGetDeviceProperties(&devProps, dev) == 0) { printf("Using device %d:\n", dev); printf("%s; global mem: %d B; compute v%d.%d; clock: %d kHz\n", devProps.name, (int)devProps.totalGlobalMem, (int)devProps.major, (int)devProps.minor, (int)devProps.clockRate); } const int ARRAY_SIZE = 1 << 10; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; float sum = 0.0f; for(int i = 0; i < ARRAY_SIZE; i++) { // generate random float in [-1.0f, 1.0f] h_in[i] = -1.0f + (float)random()/((float)RAND_MAX/2.0f); //h_in[i] = i; //printf("%f\n", h_in[i]); sum += h_in[i]; } printf("sum using serial reduce: %f\n", sum); // declare GPU memory pointers float * d_in, * d_intermediate, * d_out; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated cudaMalloc((void **) &d_out, sizeof(float)); // transfer the input array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); int whichKernel = 0; if (argc == 2) { whichKernel = atoi(argv[1]); } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); const int itt = 2; // launch the kernel switch(whichKernel) { case 0: printf("Running global reduce\n"); cudaEventRecord(start, 0); for (int i = 0; i < itt; i++) { cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false); } cudaEventRecord(stop, 0); break; case 1: printf("Running reduce with shared mem\n"); cudaEventRecord(start, 0); for (int i = 0; i < itt; i++) { reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true); } cudaEventRecord(stop, 0); break; default: fprintf(stderr, "error: ran no kernel\n"); exit(EXIT_FAILURE); } cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime /= (float)itt; // 100 trials // copy back the sum from GPU float h_out; cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost); printf("sum using kernel: %f\n", h_out); printf("average time elapsed: %f\n", elapsedTime); // free GPU memory allocation cudaFree(d_in); cudaFree(d_intermediate); cudaFree(d_out); return 0; }
97eca37a98cffd82dcb7ce15719cd91d1e7aac76.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/manual_crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void ManualCropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int lines = top[0]->count() / top[0]->width(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, top[0]->height(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom_data + bottom[0]->offset(0, 0, x_, y_), top_data); } template <typename Dtype> void ManualCropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int lines = top[0]->count() / top[0]->width(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, top[0]->height(), top[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top_diff, bottom_diff + bottom[0]->offset(0, 0, x_, y_)); } } INSTANTIATE_LAYER_GPU_FUNCS(ManualCropLayer); } // namespace caffe
97eca37a98cffd82dcb7ce15719cd91d1e7aac76.cu
#include <vector> #include "caffe/layers/manual_crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void ManualCropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int lines = top[0]->count() / top[0]->width(); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, top[0]->height(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom_data + bottom[0]->offset(0, 0, x_, y_), top_data); } template <typename Dtype> void ManualCropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int lines = top[0]->count() / top[0]->width(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, top[0]->height(), top[0]->width(), top[0]->height() * top[0]->width(), top[0]->width(), bottom[0]->height() * bottom[0]->width(), bottom[0]->width(), top_diff, bottom_diff + bottom[0]->offset(0, 0, x_, y_)); } } INSTANTIATE_LAYER_GPU_FUNCS(ManualCropLayer); } // namespace caffe
85f5f8ad6f45bc6b6690c2aef227929bd7701092.hip
// !!! This is a file automatically generated by hipify!!! //#pragma comment (lib, "cublas.lib") //#include "stdio.h" //#include <hip/hip_runtime.h> //using namespace std; //#include <ctime> //#include "hip/hip_runtime.h" //#include "hiprand/hiprand_kernel.h" //#include "device_launch_parameters.h" //#include <stdio.h> //#include <stdlib.h> // //#include <string> //#include <iomanip> //#include <time.h> //#include <iostream> //#include <cmath> //#include <math.h> // //#define TRAIN_NUM 30000 //#define TEST_NUM 10000 //#define ROW 28 //#define COL 28 //#define CONV_SIZE 24 //#define POOL_SIZE 12 //#define FC1_SIZE 5 //#define FC2_SIZE 10 //#define CONV_W_SIZE 5 //#define CONV_W_NUM 6 // //float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_b[CONV_W_NUM]; //float input[ROW][COL]; //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; // //__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //__device__ float _conv_b[CONV_W_NUM]; //__device__ float _input[ROW][COL]; //__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //__device__ float _conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; // //float get_rand(float fan_in) //{ // float sum = 0; // for (int i = 0;i < 12;i++) // sum += (float)rand() / RAND_MAX; // sum -= 6; // sum *= 1 / sqrt(fan_in); // return sum; //} //void init_params() //{ // for (int i = 0;i < CONV_W_NUM;i++) // { // for (int j = 0;j < CONV_W_SIZE;j++) // for (int k = 0;k < CONV_W_SIZE;k++) // conv_w[i][j][k] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // conv_b[i] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // } // // /*for (int i = 0;i < FC1_SIZE;i++) // { // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fc1_w[i][j][k][l] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // fc1_b[i] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // } // // for (int i = 0;i < FC2_SIZE;i++) // { // for (int j = 0;j < FC1_SIZE;j++) // fc2_w[i][j] = get_rand(FC1_SIZE); // fc2_b[i] = get_rand(FC1_SIZE); // }*/ //} // //float sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // //void input_conv() //{ // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < CONV_SIZE;j++) // for (int k = 0;k < CONV_SIZE;k++) // { // conv_z[i][j][k] = 0; // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // conv_z[i][j][k] += input[j + l][k + m] * conv_w[i][l][m]; // conv_z[i][j][k] += conv_b[i]; // conv_a[i][j][k] = sigmoid(conv_z[i][j][k]); // } //} // //__device__ float _sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // //__global__ void _input_conv() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE) // { // _conv_z[ix][iy][iz] = 0; // // #pragma unroll // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // _conv_z[ix][iy][iz] += _input[iy + l][iz + m] * _conv_w[ix][l][m]; // _conv_z[ix][iy][iz] += _conv_b[ix]; // _conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]); // } //} // //__global__ void _input_conv_reduce() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // // __shared__ int data[25]; // int tid = threadIdx.x; // //int i = 2*blockIdx.x * blockDim.x + threadIdx.x; // data[tid] = _input[blockIdx.x+iy][blockIdx.y+iz] * _conv_w[blockIdx.x][iy][iz]; // __syncthreads(); // for (int s = blockDim.x / 2; s > 0; s >>= 1) { // if (tid < s) // data[tid] += data[tid + s]; // __syncthreads(); // } // if (tid == 0) { // _conv_z[ix][iy][iz] = data[0]; // data[0] += _conv_b[ix]; // _conv_a[ix][iy][iz] = _sigmoid(data[0]); // } //} //int main() { // clock_t t = clock(); // // // cout << "-----------------------CPU------------------" << endl; // for (int j = 0;j < TRAIN_NUM;j++) // { // //input_conv(); // if (j && j % 100 == 0) // printf("Training Time spent : %.0fs Image count : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j); // } // printf("Training Time spent : %.0fs Image count : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM); // // cout << "-----------------------GPU------------------" << endl; // t = clock(); // //dim3 grid_input(1, 24, 24); // dim3 block_input(6, 24, 24); // dim3 grid_input(1, 5, 5); // for (int j = 0;j < TRAIN_NUM;j++) // { // _input_conv_reduce << <block_input, grid_input>> > (); // if (j && j % 100 == 0) // printf("Training Time spent : %.0fs Image count : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j); // } // printf("Training Time spent : %.0fs Image count : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM); // // // // return 0; //}
85f5f8ad6f45bc6b6690c2aef227929bd7701092.cu
//#pragma comment (lib, "cublas.lib") //#include "stdio.h" //#include <cuda.h> //using namespace std; //#include <ctime> //#include "cuda_runtime.h" //#include "curand_kernel.h" //#include "device_launch_parameters.h" //#include <stdio.h> //#include <stdlib.h> // //#include <string> //#include <iomanip> //#include <time.h> //#include <iostream> //#include <cmath> //#include <math.h> // //#define TRAIN_NUM 30000 //#define TEST_NUM 10000 //#define ROW 28 //#define COL 28 //#define CONV_SIZE 24 //#define POOL_SIZE 12 //#define FC1_SIZE 5 //#define FC2_SIZE 10 //#define CONV_W_SIZE 5 //#define CONV_W_NUM 6 // //float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_b[CONV_W_NUM]; //float input[ROW][COL]; //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; // //__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //__device__ float _conv_b[CONV_W_NUM]; //__device__ float _input[ROW][COL]; //__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //__device__ float _conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; // //float get_rand(float fan_in) //{ // float sum = 0; // for (int i = 0;i < 12;i++) // sum += (float)rand() / RAND_MAX; // sum -= 6; // sum *= 1 / sqrt(fan_in); // return sum; //} //void init_params() //{ // for (int i = 0;i < CONV_W_NUM;i++) // { // for (int j = 0;j < CONV_W_SIZE;j++) // for (int k = 0;k < CONV_W_SIZE;k++) // conv_w[i][j][k] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // conv_b[i] = get_rand(CONV_W_SIZE * CONV_W_SIZE); // } // // /*for (int i = 0;i < FC1_SIZE;i++) // { // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fc1_w[i][j][k][l] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // fc1_b[i] = get_rand(POOL_SIZE * POOL_SIZE * CONV_W_NUM); // } // // for (int i = 0;i < FC2_SIZE;i++) // { // for (int j = 0;j < FC1_SIZE;j++) // fc2_w[i][j] = get_rand(FC1_SIZE); // fc2_b[i] = get_rand(FC1_SIZE); // }*/ //} // //float sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // //void input_conv() //{ // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < CONV_SIZE;j++) // for (int k = 0;k < CONV_SIZE;k++) // { // conv_z[i][j][k] = 0; // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // conv_z[i][j][k] += input[j + l][k + m] * conv_w[i][l][m]; // conv_z[i][j][k] += conv_b[i]; // conv_a[i][j][k] = sigmoid(conv_z[i][j][k]); // } //} // //__device__ float _sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // //__global__ void _input_conv() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE) // { // _conv_z[ix][iy][iz] = 0; // // #pragma unroll // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // _conv_z[ix][iy][iz] += _input[iy + l][iz + m] * _conv_w[ix][l][m]; // _conv_z[ix][iy][iz] += _conv_b[ix]; // _conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]); // } //} // //__global__ void _input_conv_reduce() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // // __shared__ int data[25]; // int tid = threadIdx.x; // //int i = 2*blockIdx.x * blockDim.x + threadIdx.x; // data[tid] = _input[blockIdx.x+iy][blockIdx.y+iz] * _conv_w[blockIdx.x][iy][iz]; // __syncthreads(); // for (int s = blockDim.x / 2; s > 0; s >>= 1) { // if (tid < s) // data[tid] += data[tid + s]; // __syncthreads(); // } // if (tid == 0) { // _conv_z[ix][iy][iz] = data[0]; // data[0] += _conv_b[ix]; // _conv_a[ix][iy][iz] = _sigmoid(data[0]); // } //} //int main() { // clock_t t = clock(); // // // cout << "-----------------------CPU------------------" << endl; // for (int j = 0;j < TRAIN_NUM;j++) // { // //input_conv(); // if (j && j % 100 == 0) // printf("Training Time spent : %.0fs Image count : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j); // } // printf("Training Time spent : %.0fs Image count : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM); // // cout << "-----------------------GPU------------------" << endl; // t = clock(); // //dim3 grid_input(1, 24, 24); // dim3 block_input(6, 24, 24); // dim3 grid_input(1, 5, 5); // for (int j = 0;j < TRAIN_NUM;j++) // { // _input_conv_reduce << <block_input, grid_input>> > (); // if (j && j % 100 == 0) // printf("Training Time spent : %.0fs Image count : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j); // } // printf("Training Time spent : %.0fs Image count : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM); // // // // return 0; //}
25317514c975745c0349cd4cb89dd660278d0eaa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdint.h> #include <math.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" __global__ void Blur(int height, int width, uint8_t *d_img, unsigned int *d_res) { int gidx = threadIdx.x + blockDim.x * blockIdx.x; int size = height * width; if (gidx < size) { unsigned char value = d_img[gidx]; int bin = value % 256; atomicAdd(&d_res[bin], 1); } } int main(int argc, char **argv) { int width, height, bpp, size; FILE *fp; fp = fopen("res.txt", "w"); uint8_t* h_img_0 = stbi_load("corgi.jpg", &width, &height, &bpp, 3); size = height * width; uint8_t* h_img = (uint8_t *) malloc(sizeof(uint8_t) * size); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_img[j*width + i] = (h_img_0[j*width*3 + i*3] + \ h_img_0[j*width*3 + i*3 + 1] + \ h_img_0[j*width*3 + i*3 + 2]) / 3.; } } uint8_t *d_img; unsigned int *d_res; unsigned int *h_res = (unsigned int *) malloc(sizeof(unsigned int) * 256); hipMalloc(&d_img, sizeof(uint8_t) * size); hipMalloc(&d_res, sizeof(unsigned int) * 256); hipMemset(d_res, 0, sizeof(unsigned int) * 256); hipMemcpy(d_img, h_img, sizeof(uint8_t) * size, hipMemcpyHostToDevice); int block_size, grid_size; block_size = 256; grid_size = size / block_size; dim3 dimBlock(block_size); dim3 dimGrid(grid_size); hipLaunchKernelGGL(( Blur), dim3(dimGrid), dim3(dimBlock), 0, 0, height, width, d_img, d_res); hipDeviceSynchronize(); hipMemcpy(h_res, d_res, sizeof(unsigned int) * 256, hipMemcpyDeviceToHost); for (int i = 0; i < 256; i++) { fprintf(fp, "%d\t", h_res[i]); } return 0; }
25317514c975745c0349cd4cb89dd660278d0eaa.cu
#include <stdio.h> #include <string.h> #include <stdint.h> #include <math.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" __global__ void Blur(int height, int width, uint8_t *d_img, unsigned int *d_res) { int gidx = threadIdx.x + blockDim.x * blockIdx.x; int size = height * width; if (gidx < size) { unsigned char value = d_img[gidx]; int bin = value % 256; atomicAdd(&d_res[bin], 1); } } int main(int argc, char **argv) { int width, height, bpp, size; FILE *fp; fp = fopen("res.txt", "w"); uint8_t* h_img_0 = stbi_load("corgi.jpg", &width, &height, &bpp, 3); size = height * width; uint8_t* h_img = (uint8_t *) malloc(sizeof(uint8_t) * size); for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { h_img[j*width + i] = (h_img_0[j*width*3 + i*3] + \ h_img_0[j*width*3 + i*3 + 1] + \ h_img_0[j*width*3 + i*3 + 2]) / 3.; } } uint8_t *d_img; unsigned int *d_res; unsigned int *h_res = (unsigned int *) malloc(sizeof(unsigned int) * 256); cudaMalloc(&d_img, sizeof(uint8_t) * size); cudaMalloc(&d_res, sizeof(unsigned int) * 256); cudaMemset(d_res, 0, sizeof(unsigned int) * 256); cudaMemcpy(d_img, h_img, sizeof(uint8_t) * size, cudaMemcpyHostToDevice); int block_size, grid_size; block_size = 256; grid_size = size / block_size; dim3 dimBlock(block_size); dim3 dimGrid(grid_size); Blur<<<dimGrid, dimBlock>>>(height, width, d_img, d_res); cudaDeviceSynchronize(); cudaMemcpy(h_res, d_res, sizeof(unsigned int) * 256, cudaMemcpyDeviceToHost); for (int i = 0; i < 256; i++) { fprintf(fp, "%d\t", h_res[i]); } return 0; }
a5192b4e76d6485c0e439bb7fbd781791d5b63fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> // Define maximum number of vertices in the graph #define N 317080 #define EDGES 1049886 // Data structure to store graph struct Graph { // An array of pointers to Node to represent adjacency list struct Node* head[N+1]; }; // A data structure to store adjacency list nodes of the graph struct Node { int dest; struct Node* next; }; // data structure to store graph edges struct Edge { int src, dest; }; struct author{ int id; int co_auth; }; extern __managed__ struct Graph * graph ; extern __managed__ struct Node* newNode ; extern __managed__ struct author *auth_list; extern __managed__ int *dist_auth; // Function to create an adjacency list from specified edges __host__ void createGraph(struct Graph* graph, struct Edge edges[], int n) { unsigned i; // allocate memory for graph data structure //struct Graph* graph = (struct Graph*)malloc(sizeof(struct Graph)); // initialize head pointer for all vertices for (i = 0; i < N+1; i++){ graph->head[i] = NULL; } // add edges to the directed graph one by one for (i = 0; i < N+1; i++) { // get source and destination vertex int src = edges[i].src; int dest = edges[i].dest; // allocate new node of Adjacency List from src to dest hipMallocManaged(&newNode, sizeof(struct Node), (unsigned int)hipMemAttachGlobal); hipMemAdvise(newNode, sizeof(struct Node), hipMemAdviseSetAccessedBy, hipCpuDeviceId); //struct Node* newNode = (struct Node*)malloc(sizeof(struct Node)); newNode->dest = dest; // point new node to current head newNode->next = graph->head[src]; // point head pointer to new node graph->head[src] = newNode; // 2. allocate new node of Adjacency List from dest to src hipMallocManaged(&newNode, sizeof(struct Node), (unsigned int)hipMemAttachGlobal); hipMemAdvise(newNode, sizeof(struct Node), hipMemAdviseSetAccessedBy, hipCpuDeviceId); //newNode = (struct Node*)malloc(sizeof(struct Node)); newNode->dest = src; // point new node to current head newNode->next = graph->head[dest]; // change head pointer to point to the new node graph->head[dest] = newNode; } //return graph; } // Function to print adjacency list representation of graph __global__ void countAuth(struct Graph* graph,struct author *auth_list, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE int stride = blockDim.x * gridDim.x; int i; for (i = tid; i < n+1; i+=stride) { //printf("%d\n", tid+i); int co_auth = 0; // print current vertex and all ts neighbors struct Node* ptr = graph->head[i]; while (ptr != NULL) { //printf("(%d -> %d)\t", tid, ptr->dest); ptr = ptr->next; co_auth++; } auth_list[i].id = i; auth_list[i].co_auth = co_auth; //printf("\n"); } } __global__ void distAuth(struct author *auth_list, int *dist_auth, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE int stride = blockDim.x * gridDim.x; int i; for (i = tid; i < n+1; i+=stride) { int idx = auth_list[i].co_auth; atomicAdd(dist_auth + idx, 1); } } long get_vert(char *str){ char vert[20]; int space_count = 0; int num_vert=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 2){ vert[j] = str[i]; j++; } else if(space_count>2) break; i++; } vert[j] = '\0'; //printf("%s\n", vert); num_vert = atoi(vert); //printf("%d\n", num_vert); return num_vert; } int get_src(char *str){ char s[20]; int space_count = 0; int src=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 0){ s[j] = str[i]; j++; } else break; i++; } s[j] = '\0'; //printf("%s\n", s); src = atoi(s); //printf("%d\n", src); return src; } int get_dst(char *str){ char d[20]; int space_count = 0; int dst=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 1){ d[j] = str[i]; j++; } else if(space_count>1) break; i++; } d[j] = '\0'; //printf("%s\n", d); dst = atoi(d); //printf("%d\n", dst); return dst; } int comparator(const void *p, const void *q) { int l = ((struct author *)p)->co_auth; int r = ((struct author *)q)->co_auth; return (r - l); } // Directed Graph Implementation in C int main(void) { // input array containing edges of the graph (as per above diagram) // (x, y) pair in the array represents an edge from x to y struct Edge *edges; edges = (struct Edge *) calloc (EDGES, sizeof(struct Edge)); FILE *fp; char str[200]; const char* file = "dblp-co-authors.txt"; //const char* file = "test.txt"; fp = fopen(file, "r"); if (fp == NULL){ printf("Could not open file %s",file); return 1; } int vert, i=0; fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); //printf("%s", str); vert = get_vert(str); long src, dst; //new_graph(vert); //struct graph* gph = new_graph(vert); while (fgets(str, 200, fp) != NULL){ //printf("%s", str); src = get_src(str); dst = get_dst(str); edges[i].src = src; edges[i].dest = dst; i++; } printf("Edges copied....\n"); // calculate number of edges int n = sizeof(edges)/sizeof(edges[0]); hipMallocManaged(&graph, sizeof(struct Graph), (unsigned int)hipMemAttachGlobal); hipMemAdvise(graph, sizeof(struct Graph), hipMemAdviseSetAccessedBy, hipCpuDeviceId); createGraph(graph, edges, N); printf("Graph Created...\n"); int graph_size = N + 1; int block_size = 64; int grid_size = (graph_size + block_size - 1)/block_size; // Set device that we will use for our cuda code hipSetDevice(0); hipMallocManaged(&auth_list, graph_size * sizeof(struct author), (unsigned int)hipMemAttachGlobal); hipMemAdvise(auth_list, graph_size * sizeof(struct author), hipMemAdviseSetAccessedBy, hipCpuDeviceId); // print adjacency list representation of graph hipLaunchKernelGGL(( countAuth), dim3(grid_size), dim3(block_size), 0, 0, graph, auth_list, N); hipDeviceSynchronize(); /*for(i=0;i<N+1;i++){ printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); }*/ qsort((void*)auth_list, graph_size, sizeof(struct author), comparator); /*for(i=0;i<N+1;i++){ printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); }*/ int max = auth_list[0].co_auth; for(i=0;i<N+1;i++){ if(auth_list[i].co_auth == max) printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); } hipMallocManaged(&dist_auth, (max+1) * sizeof(int), (unsigned int)hipMemAttachGlobal); hipMemAdvise(dist_auth, (max+1) * sizeof(int), hipMemAdviseSetAccessedBy, hipCpuDeviceId); hipMemset(dist_auth, 0, (max+1)*sizeof(int)); graph_size = N + 1; block_size = 64; grid_size = (graph_size + block_size - 1)/block_size; hipLaunchKernelGGL(( distAuth), dim3(grid_size), dim3(block_size), 0, 0, auth_list, dist_auth, N); hipDeviceSynchronize(); for(i=0;i<=max;i++){ printf("Dist %d: %d\n", i, dist_auth[i]); } return 0; }
a5192b4e76d6485c0e439bb7fbd781791d5b63fc.cu
#include <stdio.h> #include <stdlib.h> // Define maximum number of vertices in the graph #define N 317080 #define EDGES 1049886 // Data structure to store graph struct Graph { // An array of pointers to Node to represent adjacency list struct Node* head[N+1]; }; // A data structure to store adjacency list nodes of the graph struct Node { int dest; struct Node* next; }; // data structure to store graph edges struct Edge { int src, dest; }; struct author{ int id; int co_auth; }; extern __managed__ struct Graph * graph ; extern __managed__ struct Node* newNode ; extern __managed__ struct author *auth_list; extern __managed__ int *dist_auth; // Function to create an adjacency list from specified edges __host__ void createGraph(struct Graph* graph, struct Edge edges[], int n) { unsigned i; // allocate memory for graph data structure //struct Graph* graph = (struct Graph*)malloc(sizeof(struct Graph)); // initialize head pointer for all vertices for (i = 0; i < N+1; i++){ graph->head[i] = NULL; } // add edges to the directed graph one by one for (i = 0; i < N+1; i++) { // get source and destination vertex int src = edges[i].src; int dest = edges[i].dest; // allocate new node of Adjacency List from src to dest cudaMallocManaged(&newNode, sizeof(struct Node), (unsigned int)cudaMemAttachGlobal); cudaMemAdvise(newNode, sizeof(struct Node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); //struct Node* newNode = (struct Node*)malloc(sizeof(struct Node)); newNode->dest = dest; // point new node to current head newNode->next = graph->head[src]; // point head pointer to new node graph->head[src] = newNode; // 2. allocate new node of Adjacency List from dest to src cudaMallocManaged(&newNode, sizeof(struct Node), (unsigned int)cudaMemAttachGlobal); cudaMemAdvise(newNode, sizeof(struct Node), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); //newNode = (struct Node*)malloc(sizeof(struct Node)); newNode->dest = src; // point new node to current head newNode->next = graph->head[dest]; // change head pointer to point to the new node graph->head[dest] = newNode; } //return graph; } // Function to print adjacency list representation of graph __global__ void countAuth(struct Graph* graph,struct author *auth_list, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE int stride = blockDim.x * gridDim.x; int i; for (i = tid; i < n+1; i+=stride) { //printf("%d\n", tid+i); int co_auth = 0; // print current vertex and all ts neighbors struct Node* ptr = graph->head[i]; while (ptr != NULL) { //printf("(%d -> %d)\t", tid, ptr->dest); ptr = ptr->next; co_auth++; } auth_list[i].id = i; auth_list[i].co_auth = co_auth; //printf("\n"); } } __global__ void distAuth(struct author *auth_list, int *dist_auth, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE int stride = blockDim.x * gridDim.x; int i; for (i = tid; i < n+1; i+=stride) { int idx = auth_list[i].co_auth; atomicAdd(dist_auth + idx, 1); } } long get_vert(char *str){ char vert[20]; int space_count = 0; int num_vert=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 2){ vert[j] = str[i]; j++; } else if(space_count>2) break; i++; } vert[j] = '\0'; //printf("%s\n", vert); num_vert = atoi(vert); //printf("%d\n", num_vert); return num_vert; } int get_src(char *str){ char s[20]; int space_count = 0; int src=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 0){ s[j] = str[i]; j++; } else break; i++; } s[j] = '\0'; //printf("%s\n", s); src = atoi(s); //printf("%d\n", src); return src; } int get_dst(char *str){ char d[20]; int space_count = 0; int dst=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 1){ d[j] = str[i]; j++; } else if(space_count>1) break; i++; } d[j] = '\0'; //printf("%s\n", d); dst = atoi(d); //printf("%d\n", dst); return dst; } int comparator(const void *p, const void *q) { int l = ((struct author *)p)->co_auth; int r = ((struct author *)q)->co_auth; return (r - l); } // Directed Graph Implementation in C int main(void) { // input array containing edges of the graph (as per above diagram) // (x, y) pair in the array represents an edge from x to y struct Edge *edges; edges = (struct Edge *) calloc (EDGES, sizeof(struct Edge)); FILE *fp; char str[200]; const char* file = "dblp-co-authors.txt"; //const char* file = "test.txt"; fp = fopen(file, "r"); if (fp == NULL){ printf("Could not open file %s",file); return 1; } int vert, i=0; fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); fgets(str, 200, fp); //printf("%s", str); vert = get_vert(str); long src, dst; //new_graph(vert); //struct graph* gph = new_graph(vert); while (fgets(str, 200, fp) != NULL){ //printf("%s", str); src = get_src(str); dst = get_dst(str); edges[i].src = src; edges[i].dest = dst; i++; } printf("Edges copied....\n"); // calculate number of edges int n = sizeof(edges)/sizeof(edges[0]); cudaMallocManaged(&graph, sizeof(struct Graph), (unsigned int)cudaMemAttachGlobal); cudaMemAdvise(graph, sizeof(struct Graph), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); createGraph(graph, edges, N); printf("Graph Created...\n"); int graph_size = N + 1; int block_size = 64; int grid_size = (graph_size + block_size - 1)/block_size; // Set device that we will use for our cuda code cudaSetDevice(0); cudaMallocManaged(&auth_list, graph_size * sizeof(struct author), (unsigned int)cudaMemAttachGlobal); cudaMemAdvise(auth_list, graph_size * sizeof(struct author), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); // print adjacency list representation of graph countAuth<<<grid_size, block_size>>>(graph, auth_list, N); cudaDeviceSynchronize(); /*for(i=0;i<N+1;i++){ printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); }*/ qsort((void*)auth_list, graph_size, sizeof(struct author), comparator); /*for(i=0;i<N+1;i++){ printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); }*/ int max = auth_list[0].co_auth; for(i=0;i<N+1;i++){ if(auth_list[i].co_auth == max) printf("Author %d : %d\n",auth_list[i].id, auth_list[i].co_auth); } cudaMallocManaged(&dist_auth, (max+1) * sizeof(int), (unsigned int)cudaMemAttachGlobal); cudaMemAdvise(dist_auth, (max+1) * sizeof(int), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId); cudaMemset(dist_auth, 0, (max+1)*sizeof(int)); graph_size = N + 1; block_size = 64; grid_size = (graph_size + block_size - 1)/block_size; distAuth<<<grid_size, block_size>>>(auth_list, dist_auth, N); cudaDeviceSynchronize(); for(i=0;i<=max;i++){ printf("Dist %d: %d\n", i, dist_auth[i]); } return 0; }
68cef633c61b507b5abcfeab2a2b61be280b1591.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <gauge_field_order.h> namespace quda { /** Kernel argument struct */ template <typename OutOrder, typename InOrder> struct CopyGaugeArg { OutOrder out; const InOrder in; int volume; int faceVolumeCB[QUDA_MAX_DIM]; int nDim; CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume, const int *faceVolumeCB, int nDim) : out(out), in(in), volume(volume), nDim(nDim) { for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d]; } }; /** Generic CPU gauge reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { for (int x=0; x<arg.volume/2; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, d, parity); } } } } /** Generic CUDA gauge reordering and packing. Adopts a similar form as the CPU version, using the same inlined functions. */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> __global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volume/2) return; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, d, parity); } } } /** Generic CPU gauge ghost reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { for (int x=0; x<arg.faceVolumeCB[d]; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.loadGhost(in, x, d, parity); // assumes we are loading for (int i=0; i<length; i++) out[i] = in[i]; arg.out.saveGhost(out, x, d, parity); } } } } /** Generic CUDA kernel for copying the ghost zone. Adopts a similar form as the CPU version, using the same inlined functions. */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> __global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x = blockIdx.x * blockDim.x + threadIdx.x; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { if (x < arg.faceVolumeCB[d]) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.loadGhost(in, x, d, parity); // assumes we are loading for (int i=0; i<length; i++) out[i] = in[i]; arg.out.saveGhost(out, x, d, parity); } } } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost> class CopyGauge : Tunable { CopyGaugeArg<OutOrder,InOrder> arg; int size; private: int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool advanceGridDim(TuneParam &param) const { return false; } // Don't tune the grid dimensions. bool advanceBlockDim(TuneParam &param) const { bool advance = Tunable::advanceBlockDim(param); if (advance) param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); return advance; } public: CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg) : arg(arg) { int faceMax = 0; for (int d=0; d<arg.nDim; d++) { faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax; } size = isGhost ? faceMax : arg.volume/2; } virtual ~CopyGauge() { ; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (!isGhost) { hipLaunchKernelGGL(( copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } else { hipLaunchKernelGGL(( copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder>) , dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); } } TuneKey tuneKey() const { std::stringstream vol, aux; vol << arg.in.volumeCB; aux << "out_stride=" << arg.out.stride << ",in_stride=" << arg.in.stride; return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); } long long flops() const { return 0; } long long bytes() const { int sites = 4*arg.volume/2; if (isGhost) { sites = 0; for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d]; } return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn) + arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) ); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB, int nDim, QudaFieldLocation location, int type) { CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim); if (location == QUDA_CPU_FIELD_LOCATION) { if (type == 0) copyGauge<FloatOut, FloatIn, length>(arg); #ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu copyGhost<FloatOut, FloatIn, length>(arg); #endif } else if (location == QUDA_CUDA_FIELD_LOCATION) { // first copy body if (type == 0) { CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg); gaugeCopier.apply(0); } #ifdef MULTI_GPU // now copy ghost CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg); ghostCopier.apply(0); #endif } else { errorQuda("Undefined field location %d for copyGauge", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location, FloatOut *Out, FloatOut **outGhost, int type) { int faceVolumeCB[QUDA_MAX_DIM]; for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface(); if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,19>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,18>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,12>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,8>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,13>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,9>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_FLOAT4_GAUGE_ORDER) { if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,12>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,8>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,13>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,9>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGauge<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("QDP interface has not been built\n"); #endif } else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyGauge<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE copyGauge<FloatOut,FloatIn,length> (CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("CPS interface has not been built\n"); #endif } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGauge<FloatOut,FloatIn,length> (MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("MILC interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyGauge<FloatOut,FloatIn,length> (BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) { // reconstruction only supported on FloatN fields currently if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,19>(in, In, inGhost), out, location, Out, outGhost, type); } else { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,18>(in, In, inGhost), out, location, Out, outGhost, type); } } else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,12>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,8>(in, In, inGhost), out, location, Out, outGhost, type); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,13>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,9>(in, In, inGhost), out, location, Out, outGhost, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_FLOAT4_GAUGE_ORDER) { if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,12>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,8>(in, In, inGhost), out, location, Out, outGhost, type); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,13>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,9>(in, In, inGhost), out, location, Out, outGhost, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("QDP interface has not been built\n"); #endif } else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("CPS interface has not been built\n"); #endif } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("MILC interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", in.Order()); } } template <typename FloatOut, typename FloatIn> void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) { if (in.Ncolor() != 3 && out.Ncolor() != 3) { errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor()); } if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) { // we are doing gauge field packing copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type); } else { if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location); // we are doing momentum field packing if (in.Reconstruct() != QUDA_RECONSTRUCT_10 || out.Reconstruct() != QUDA_RECONSTRUCT_10) { errorQuda("Unsupported reconstruction types out=%d in=%d for momentum field", out.Reconstruct(), in.Reconstruct()); } int faceVolumeCB[QUDA_MAX_DIM]; for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface(); // momentum only currently supported on MILC and Float2 fields currently if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> > arg(FloatNOrder<FloatOut,10,2,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> > arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); #else errorQuda("MILC interface has not been built\n"); #endif } else { errorQuda("Gauge field orders %d not supported", in.Order()); } } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> > arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> > arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else { errorQuda("Gauge field orders %d not supported", in.Order()); } #else errorQuda("MILC interface has not been built\n"); #endif } else { errorQuda("Gauge field orders %d not supported", out.Order()); } } } // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out, void *In, void **ghostOut, void **ghostIn, int type) { if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGauge(out, in, location, (double*)Out, (double*)In, (double**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (double*)Out, (float*)In, (double**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (double*)Out, (short*)In, (double**)ghostOut, (short**)ghostIn, type); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGauge(out, in, location, (float*)Out, (double*)In, (float**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (float*)Out, (float*)In, (float**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (float*)Out, (short*)In, (float**)ghostOut, (short**)ghostIn, type); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyGauge(out, in, location, (short*)Out, (double*)In, (short**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (short*)Out, (float*)In, (short**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (short*)Out, (short*)In, (short**)ghostOut, (short**)ghostIn, type); } } } } // namespace quda
68cef633c61b507b5abcfeab2a2b61be280b1591.cu
#include <gauge_field_order.h> namespace quda { /** Kernel argument struct */ template <typename OutOrder, typename InOrder> struct CopyGaugeArg { OutOrder out; const InOrder in; int volume; int faceVolumeCB[QUDA_MAX_DIM]; int nDim; CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume, const int *faceVolumeCB, int nDim) : out(out), in(in), volume(volume), nDim(nDim) { for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d]; } }; /** Generic CPU gauge reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { for (int x=0; x<arg.volume/2; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, d, parity); } } } } /** Generic CUDA gauge reordering and packing. Adopts a similar form as the CPU version, using the same inlined functions. */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> __global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= arg.volume/2) return; RegTypeIn in[length]; RegTypeOut out[length]; arg.in.load(in, x, d, parity); for (int i=0; i<length; i++) out[i] = in[i]; arg.out.save(out, x, d, parity); } } } /** Generic CPU gauge ghost reordering and packing */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { for (int x=0; x<arg.faceVolumeCB[d]; x++) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.loadGhost(in, x, d, parity); // assumes we are loading for (int i=0; i<length; i++) out[i] = in[i]; arg.out.saveGhost(out, x, d, parity); } } } } /** Generic CUDA kernel for copying the ghost zone. Adopts a similar form as the CPU version, using the same inlined functions. */ template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> __global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) { typedef typename mapper<FloatIn>::type RegTypeIn; typedef typename mapper<FloatOut>::type RegTypeOut; int x = blockIdx.x * blockDim.x + threadIdx.x; for (int parity=0; parity<2; parity++) { for (int d=0; d<arg.nDim; d++) { if (x < arg.faceVolumeCB[d]) { RegTypeIn in[length]; RegTypeOut out[length]; arg.in.loadGhost(in, x, d, parity); // assumes we are loading for (int i=0; i<length; i++) out[i] = in[i]; arg.out.saveGhost(out, x, d, parity); } } } } template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost> class CopyGauge : Tunable { CopyGaugeArg<OutOrder,InOrder> arg; int size; private: int sharedBytesPerThread() const { return 0; } int sharedBytesPerBlock(const TuneParam &param) const { return 0 ;} bool advanceGridDim(TuneParam &param) const { return false; } // Don't tune the grid dimensions. bool advanceBlockDim(TuneParam &param) const { bool advance = Tunable::advanceBlockDim(param); if (advance) param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); return advance; } public: CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg) : arg(arg) { int faceMax = 0; for (int d=0; d<arg.nDim; d++) { faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax; } size = isGhost ? faceMax : arg.volume/2; } virtual ~CopyGauge() { ; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); if (!isGhost) { copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } else { copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder> <<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); } } TuneKey tuneKey() const { std::stringstream vol, aux; vol << arg.in.volumeCB; aux << "out_stride=" << arg.out.stride << ",in_stride=" << arg.in.stride; return TuneKey(vol.str(), typeid(*this).name(), aux.str()); } std::string paramString(const TuneParam &param) const { // Don't bother printing the grid dim. std::stringstream ps; ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), "; ps << "shared=" << param.shared_bytes; return ps.str(); } virtual void initTuneParam(TuneParam &param) const { Tunable::initTuneParam(param); param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); } /** sets default values for when tuning is disabled */ virtual void defaultTuneParam(TuneParam &param) const { Tunable::defaultTuneParam(param); param.grid = dim3( (size+param.block.x-1) / param.block.x, 1, 1); } long long flops() const { return 0; } long long bytes() const { int sites = 4*arg.volume/2; if (isGhost) { sites = 0; for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d]; } return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn) + arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) ); } }; template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder> void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB, int nDim, QudaFieldLocation location, int type) { CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim); if (location == QUDA_CPU_FIELD_LOCATION) { if (type == 0) copyGauge<FloatOut, FloatIn, length>(arg); #ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu copyGhost<FloatOut, FloatIn, length>(arg); #endif } else if (location == QUDA_CUDA_FIELD_LOCATION) { // first copy body if (type == 0) { CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg); gaugeCopier.apply(0); } #ifdef MULTI_GPU // now copy ghost CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg); ghostCopier.apply(0); #endif } else { errorQuda("Undefined field location %d for copyGauge", location); } } template <typename FloatOut, typename FloatIn, int length, typename InOrder> void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location, FloatOut *Out, FloatOut **outGhost, int type) { int faceVolumeCB[QUDA_MAX_DIM]; for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface(); if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,19>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,18>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } } else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,12>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,8>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,13>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,2,9>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_FLOAT4_GAUGE_ORDER) { if (out.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,12>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,8>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #ifdef GPU_STAGGERED_DIRAC } else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,13>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); } else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatOut,length,4,9>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order()); } } else if (out.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGauge<FloatOut,FloatIn,length> (QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("QDP interface has not been built\n"); #endif } else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyGauge<FloatOut,FloatIn,length> (QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE copyGauge<FloatOut,FloatIn,length> (CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("CPS interface has not been built\n"); #endif } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGauge<FloatOut,FloatIn,length> (MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("MILC interface has not been built\n"); #endif } else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyGauge<FloatOut,FloatIn,length> (BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(), faceVolumeCB, out.Ndim(), location, type); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", out.Order()); } } template <typename FloatOut, typename FloatIn, int length> void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) { // reconstruction only supported on FloatN fields currently if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) { if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,19>(in, In, inGhost), out, location, Out, outGhost, type); } else { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,18>(in, In, inGhost), out, location, Out, outGhost, type); } } else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,12>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,8>(in, In, inGhost), out, location, Out, outGhost, type); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,13>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,9>(in, In, inGhost), out, location, Out, outGhost, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_FLOAT4_GAUGE_ORDER) { if (in.Reconstruct() == QUDA_RECONSTRUCT_12) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,12>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,8>(in, In, inGhost), out, location, Out, outGhost, type); #ifdef GPU_STAGGERED_DIRAC } else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,13>(in, In, inGhost), out, location, Out, outGhost, type); } else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) { copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,9>(in, In, inGhost), out, location, Out, outGhost, type); #endif } else { errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order()); } } else if (in.Order() == QUDA_QDP_GAUGE_ORDER) { #ifdef BUILD_QDP_INTERFACE copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("QDP interface has not been built\n"); #endif } else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) { #ifdef BUILD_QDPJIT_INTERFACE copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("QDPJIT interface has not been built\n"); #endif } else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) { #ifdef BUILD_CPS_INTERFACE copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("CPS interface has not been built\n"); #endif } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("MILC interface has not been built\n"); #endif } else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) { #ifdef BUILD_BQCD_INTERFACE copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost), out, location, Out, outGhost, type); #else errorQuda("BQCD interface has not been built\n"); #endif } else { errorQuda("Gauge field %d order not supported", in.Order()); } } template <typename FloatOut, typename FloatIn> void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) { if (in.Ncolor() != 3 && out.Ncolor() != 3) { errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor()); } if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) { // we are doing gauge field packing copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type); } else { if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location); // we are doing momentum field packing if (in.Reconstruct() != QUDA_RECONSTRUCT_10 || out.Reconstruct() != QUDA_RECONSTRUCT_10) { errorQuda("Unsupported reconstruction types out=%d in=%d for momentum field", out.Reconstruct(), in.Reconstruct()); } int faceVolumeCB[QUDA_MAX_DIM]; for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface(); // momentum only currently supported on MILC and Float2 fields currently if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) { if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> > arg(FloatNOrder<FloatOut,10,2,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> > arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); #else errorQuda("MILC interface has not been built\n"); #endif } else { errorQuda("Gauge field orders %d not supported", in.Order()); } } else if (out.Order() == QUDA_MILC_GAUGE_ORDER) { #ifdef BUILD_MILC_INTERFACE if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) { CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> > arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else if (in.Order() == QUDA_MILC_GAUGE_ORDER) { CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> > arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim()); copyGauge<FloatOut,FloatIn,10>(arg); } else { errorQuda("Gauge field orders %d not supported", in.Order()); } #else errorQuda("MILC interface has not been built\n"); #endif } else { errorQuda("Gauge field orders %d not supported", out.Order()); } } } // this is the function that is actually called, from here on down we instantiate all required templates void copyGenericGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, void *Out, void *In, void **ghostOut, void **ghostIn, int type) { if (out.Precision() == QUDA_DOUBLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGauge(out, in, location, (double*)Out, (double*)In, (double**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (double*)Out, (float*)In, (double**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (double*)Out, (short*)In, (double**)ghostOut, (short**)ghostIn, type); } } else if (out.Precision() == QUDA_SINGLE_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION) { copyGauge(out, in, location, (float*)Out, (double*)In, (float**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (float*)Out, (float*)In, (float**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (float*)Out, (short*)In, (float**)ghostOut, (short**)ghostIn, type); } } else if (out.Precision() == QUDA_HALF_PRECISION) { if (in.Precision() == QUDA_DOUBLE_PRECISION){ copyGauge(out, in, location, (short*)Out, (double*)In, (short**)ghostOut, (double**)ghostIn, type); } else if (in.Precision() == QUDA_SINGLE_PRECISION) { copyGauge(out, in, location, (short*)Out, (float*)In, (short**)ghostOut, (float**)ghostIn, type); } else if (in.Precision() == QUDA_HALF_PRECISION) { copyGauge(out, in, location, (short*)Out, (short*)In, (short**)ghostOut, (short**)ghostIn, type); } } } } // namespace quda
9414b0a892d386cded233c89e4f2128e219d3c1e.hip
// !!! This is a file automatically generated by hipify!!! /* * University of Illinois Open Source License * Copyright 2008-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include "config.h" #include "core/Types.h" #include "core/Exceptions.h" #include "rdme/CudaIntLattice.h" #include "rdme/Lattice.h" namespace lm { namespace rdme { CudaIntLattice::CudaIntLattice(lattice_coord_t size, si_dist_t latticeSpacing, uint particlesPerSite) :IntLattice(size,latticeSpacing,particlesPerSite),cudaParticlesCurrent(0),cudaParticlesSize(0),cudaSiteTypesSize(0),cudaSiteTypes(NULL),isGPUMemorySynched(false) { // Initialize the pointers. cudaParticles[0] = NULL; cudaParticles[1] = NULL; // Make sure the lattice dimensions are divisible by 32. if (size.x%32 != 0 || size.y%32 != 0 || size.z%32 != 0) throw InvalidArgException("size","each dimension of a CUDA lattice must be divisible by 32"); allocateCudaMemory(); } CudaIntLattice::CudaIntLattice(lattice_size_t xSize, lattice_size_t ySize, lattice_size_t zSize, si_dist_t latticeSpacing, uint particlesPerSite) :IntLattice(xSize,ySize,zSize,latticeSpacing,particlesPerSite),cudaParticlesCurrent(0),cudaParticlesSize(0),cudaSiteTypesSize(0),cudaSiteTypes(NULL),isGPUMemorySynched(false) { // Initialize the pointers. cudaParticles[0] = NULL; cudaParticles[1] = NULL; // Make sure the lattice dimensions are divisible by 32. if (size.x%32 != 0 || size.y%32 != 0 || size.z%32 != 0) throw InvalidArgException("size","each dimension of a CUDA lattice must be divisible by 32"); allocateCudaMemory(); } CudaIntLattice::~CudaIntLattice() { deallocateCudaMemory(); } void CudaIntLattice::allocateCudaMemory() { // Allocate memory on the CUDA device. cudaParticlesSize=numberSites*wordsPerSite*sizeof(uint32_t); CUDA_EXCEPTION_CHECK(hipMalloc(&cudaParticles[0], cudaParticlesSize)); //TODO: track memory usage. CUDA_EXCEPTION_CHECK(hipMalloc(&cudaParticles[1], cudaParticlesSize)); //TODO: track memory usage. cudaSiteTypesSize=numberSites*sizeof(uint8_t); CUDA_EXCEPTION_CHECK(hipMalloc(&cudaSiteTypes, cudaSiteTypesSize)); //TODO: track memory usage. } void CudaIntLattice::deallocateCudaMemory() { // If we have any allocated device memory, free it. if (cudaParticles[0] != NULL) { CUDA_EXCEPTION_CHECK(hipFree(cudaParticles[0])); //TODO: track memory usage. cudaParticles[0] = NULL; } if (cudaParticles[1] != NULL) { CUDA_EXCEPTION_CHECK(hipFree(cudaParticles[1])); //TODO: track memory usage. cudaParticles[1] = NULL; } cudaParticlesSize = 0; if (cudaSiteTypes != NULL) { CUDA_EXCEPTION_CHECK(hipFree(cudaSiteTypes)); //TODO: track memory usage. cudaSiteTypes = NULL; cudaSiteTypesSize = 0; } } void CudaIntLattice::copyToGPU() { if (!isGPUMemorySynched) { CUDA_EXCEPTION_CHECK(hipMemcpy(cudaParticles[cudaParticlesCurrent], particles, cudaParticlesSize, hipMemcpyHostToDevice)); CUDA_EXCEPTION_CHECK(hipMemcpy(cudaSiteTypes, siteTypes, cudaSiteTypesSize, hipMemcpyHostToDevice)); isGPUMemorySynched = true; } } void CudaIntLattice::copyFromGPU() { CUDA_EXCEPTION_CHECK(hipMemcpy(particles, cudaParticles[cudaParticlesCurrent], cudaParticlesSize, hipMemcpyDeviceToHost)); CUDA_EXCEPTION_CHECK(hipMemcpy(siteTypes, cudaSiteTypes, cudaSiteTypesSize, hipMemcpyDeviceToHost)); isGPUMemorySynched = true; } void * CudaIntLattice::getGPUMemorySrc() { return cudaParticles[cudaParticlesCurrent]; } void * CudaIntLattice::getGPUMemoryDest() { return cudaParticles[cudaParticlesCurrent==0?1:0]; } void CudaIntLattice::swapSrcDest() { cudaParticlesCurrent = cudaParticlesCurrent==0?1:0; } void * CudaIntLattice::getGPUMemorySiteTypes() { return cudaSiteTypes; } void CudaIntLattice::setSiteType(lattice_size_t x, lattice_size_t y, lattice_size_t z, site_t site) { IntLattice::setSiteType(x,y,z,site); isGPUMemorySynched = false; } void CudaIntLattice::addParticle(lattice_size_t x, lattice_size_t y, lattice_size_t z, particle_t particle) { IntLattice::addParticle(x,y,z,particle); isGPUMemorySynched = false; } void CudaIntLattice::removeParticles(lattice_size_t x,lattice_size_t y,lattice_size_t z) { IntLattice::removeParticles(x,y,z); isGPUMemorySynched = false; } void CudaIntLattice::setSiteType(lattice_size_t index, site_t site) { IntLattice::setSiteType(index,site); isGPUMemorySynched = false; } void CudaIntLattice::addParticle(lattice_size_t index, particle_t particle) { IntLattice::addParticle(index,particle); isGPUMemorySynched = false; } void CudaIntLattice::removeParticles(lattice_size_t index) { IntLattice::removeParticles(index); isGPUMemorySynched = false; } void CudaIntLattice::removeAllParticles() { IntLattice::removeAllParticles(); isGPUMemorySynched = false; } void CudaIntLattice::setFromRowMajorByteData(void * buffer, size_t bufferSize) { IntLattice::setFromRowMajorByteData(buffer, bufferSize); isGPUMemorySynched = false; } } }
9414b0a892d386cded233c89e4f2128e219d3c1e.cu
/* * University of Illinois Open Source License * Copyright 2008-2018 Luthey-Schulten Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, nor the names of its contributors may be used to endorse or * promote products derived from this Software without specific prior written * permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts */ #include "config.h" #include "core/Types.h" #include "core/Exceptions.h" #include "rdme/CudaIntLattice.h" #include "rdme/Lattice.h" namespace lm { namespace rdme { CudaIntLattice::CudaIntLattice(lattice_coord_t size, si_dist_t latticeSpacing, uint particlesPerSite) :IntLattice(size,latticeSpacing,particlesPerSite),cudaParticlesCurrent(0),cudaParticlesSize(0),cudaSiteTypesSize(0),cudaSiteTypes(NULL),isGPUMemorySynched(false) { // Initialize the pointers. cudaParticles[0] = NULL; cudaParticles[1] = NULL; // Make sure the lattice dimensions are divisible by 32. if (size.x%32 != 0 || size.y%32 != 0 || size.z%32 != 0) throw InvalidArgException("size","each dimension of a CUDA lattice must be divisible by 32"); allocateCudaMemory(); } CudaIntLattice::CudaIntLattice(lattice_size_t xSize, lattice_size_t ySize, lattice_size_t zSize, si_dist_t latticeSpacing, uint particlesPerSite) :IntLattice(xSize,ySize,zSize,latticeSpacing,particlesPerSite),cudaParticlesCurrent(0),cudaParticlesSize(0),cudaSiteTypesSize(0),cudaSiteTypes(NULL),isGPUMemorySynched(false) { // Initialize the pointers. cudaParticles[0] = NULL; cudaParticles[1] = NULL; // Make sure the lattice dimensions are divisible by 32. if (size.x%32 != 0 || size.y%32 != 0 || size.z%32 != 0) throw InvalidArgException("size","each dimension of a CUDA lattice must be divisible by 32"); allocateCudaMemory(); } CudaIntLattice::~CudaIntLattice() { deallocateCudaMemory(); } void CudaIntLattice::allocateCudaMemory() { // Allocate memory on the CUDA device. cudaParticlesSize=numberSites*wordsPerSite*sizeof(uint32_t); CUDA_EXCEPTION_CHECK(cudaMalloc(&cudaParticles[0], cudaParticlesSize)); //TODO: track memory usage. CUDA_EXCEPTION_CHECK(cudaMalloc(&cudaParticles[1], cudaParticlesSize)); //TODO: track memory usage. cudaSiteTypesSize=numberSites*sizeof(uint8_t); CUDA_EXCEPTION_CHECK(cudaMalloc(&cudaSiteTypes, cudaSiteTypesSize)); //TODO: track memory usage. } void CudaIntLattice::deallocateCudaMemory() { // If we have any allocated device memory, free it. if (cudaParticles[0] != NULL) { CUDA_EXCEPTION_CHECK(cudaFree(cudaParticles[0])); //TODO: track memory usage. cudaParticles[0] = NULL; } if (cudaParticles[1] != NULL) { CUDA_EXCEPTION_CHECK(cudaFree(cudaParticles[1])); //TODO: track memory usage. cudaParticles[1] = NULL; } cudaParticlesSize = 0; if (cudaSiteTypes != NULL) { CUDA_EXCEPTION_CHECK(cudaFree(cudaSiteTypes)); //TODO: track memory usage. cudaSiteTypes = NULL; cudaSiteTypesSize = 0; } } void CudaIntLattice::copyToGPU() { if (!isGPUMemorySynched) { CUDA_EXCEPTION_CHECK(cudaMemcpy(cudaParticles[cudaParticlesCurrent], particles, cudaParticlesSize, cudaMemcpyHostToDevice)); CUDA_EXCEPTION_CHECK(cudaMemcpy(cudaSiteTypes, siteTypes, cudaSiteTypesSize, cudaMemcpyHostToDevice)); isGPUMemorySynched = true; } } void CudaIntLattice::copyFromGPU() { CUDA_EXCEPTION_CHECK(cudaMemcpy(particles, cudaParticles[cudaParticlesCurrent], cudaParticlesSize, cudaMemcpyDeviceToHost)); CUDA_EXCEPTION_CHECK(cudaMemcpy(siteTypes, cudaSiteTypes, cudaSiteTypesSize, cudaMemcpyDeviceToHost)); isGPUMemorySynched = true; } void * CudaIntLattice::getGPUMemorySrc() { return cudaParticles[cudaParticlesCurrent]; } void * CudaIntLattice::getGPUMemoryDest() { return cudaParticles[cudaParticlesCurrent==0?1:0]; } void CudaIntLattice::swapSrcDest() { cudaParticlesCurrent = cudaParticlesCurrent==0?1:0; } void * CudaIntLattice::getGPUMemorySiteTypes() { return cudaSiteTypes; } void CudaIntLattice::setSiteType(lattice_size_t x, lattice_size_t y, lattice_size_t z, site_t site) { IntLattice::setSiteType(x,y,z,site); isGPUMemorySynched = false; } void CudaIntLattice::addParticle(lattice_size_t x, lattice_size_t y, lattice_size_t z, particle_t particle) { IntLattice::addParticle(x,y,z,particle); isGPUMemorySynched = false; } void CudaIntLattice::removeParticles(lattice_size_t x,lattice_size_t y,lattice_size_t z) { IntLattice::removeParticles(x,y,z); isGPUMemorySynched = false; } void CudaIntLattice::setSiteType(lattice_size_t index, site_t site) { IntLattice::setSiteType(index,site); isGPUMemorySynched = false; } void CudaIntLattice::addParticle(lattice_size_t index, particle_t particle) { IntLattice::addParticle(index,particle); isGPUMemorySynched = false; } void CudaIntLattice::removeParticles(lattice_size_t index) { IntLattice::removeParticles(index); isGPUMemorySynched = false; } void CudaIntLattice::removeAllParticles() { IntLattice::removeAllParticles(); isGPUMemorySynched = false; } void CudaIntLattice::setFromRowMajorByteData(void * buffer, size_t bufferSize) { IntLattice::setFromRowMajorByteData(buffer, bufferSize); isGPUMemorySynched = false; } } }
9656a54949b54937cd2eab685592e269f00a57ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "upsampling.h" // Textures texture<float, 2, hipReadModeElementType> gray_img; texture<float, 2, hipReadModeElementType> imgToFilter; // Calculate weight __global__ void CalcWeightKernel(float *input, float* weight, float lambda_tgvl2, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; if (input[pos] > 0) { weight[pos] = 1.0f; } else { weight[pos] = 0.0f; } weight[pos] = weight[pos] * lambda_tgvl2; } } void lup::Upsampling::CalcWeight(float *input, float *weight, float lambda_tgvl2) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CalcWeightKernel << < blocks, threads >> > (input, weight, lambda_tgvl2, width, height, stride); } // Calculate anisotropic diffusion tensor __global__ void CalcTensorKernel(float* gray, float beta, float gamma, int size_grad, float* atensor, float* btensor, float* ctensor, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float2 grad; float t0; // x derivative /*t0 = tex2D(gray_img, x - 2.0f * dx, y); t0 -= tex2D(gray_img, x - 1.0f * dx, y) * 8.0f; t0 += tex2D(gray_img, x + 1.0f * dx, y) * 8.0f; t0 -= tex2D(gray_img, x + 2.0f * dx, y); t0 /= 12.0f;*/ t0 = tex2D(gray_img, x + 1.0f * dx, y); t0 -= tex2D(gray_img, x, y); t0 = tex2D(gray_img, x + 1.0f * dx, y + 1.0f * dy); t0 -= tex2D(gray_img, x, y + 1.0f * dy); grad.x = t0; // y derivative /*t0 = tex2D(gray_img, x, y - 2.0f * dy); t0 -= tex2D(gray_img, x, y - 1.0f * dy) * 8.0f; t0 += tex2D(gray_img, x, y + 1.0f * dy) * 8.0f; t0 -= tex2D(gray_img, x, y + 2.0f * dy); t0 /= 12.0f;*/ t0 = tex2D(gray_img, x, y + 1.0f * dy); t0 -= tex2D(gray_img, x, y); t0 = tex2D(gray_img, x + 1.0f * dx, y + 1.0f * dy); t0 -= tex2D(gray_img, x + 1.0f * dx, y); grad.y = t0; float min_n_length = 1e-8f; float min_tensor_val = 1e-8f; float abs_img = sqrtf(grad.x*grad.x + grad.y*grad.y); float norm_n = abs_img; float2 n_normed; n_normed.x = grad.x / norm_n; n_normed.y = grad.y / norm_n; if (norm_n < min_n_length) { n_normed.x = 1.0f; n_normed.y = 0.0f; } float2 nT_normed; nT_normed.x = n_normed.y; nT_normed.y = -n_normed.x; float wtensor; if (expf(-beta * powf(abs_img, gamma)) > min_tensor_val) { wtensor = expf(-beta * powf(abs_img, gamma)); } else wtensor = min_tensor_val; float a = wtensor * n_normed.x * n_normed.x + nT_normed.x * nT_normed.x; float c = wtensor * n_normed.x * n_normed.y + nT_normed.x * nT_normed.y; float b = wtensor * n_normed.y * n_normed.y + nT_normed.y * nT_normed.y; atensor[pos] = a; btensor[pos] = b; ctensor[pos] = c; } } void lup::Upsampling::CalcTensor(float* gray, float beta, float gamma, int size_grad, float* a, float* b, float* c) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); // mirror if a coordinate value is out-of-range gray_img.addressMode[0] = hipAddressModeMirror; gray_img.addressMode[1] = hipAddressModeMirror; gray_img.filterMode = hipFilterModeLinear; gray_img.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, gray_img, gray, width, height, stride * sizeof(float)); CalcTensorKernel << < blocks, threads >> > (gray, beta, gamma, size_grad, a, b, c, width, height, stride); } // Gaussian Filter // Calculate anisotropic diffusion tensor __global__ void GaussianKernel(float* input, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float2 grad; float t0 = (1 / 4.0f)*tex2D(imgToFilter, x, y); t0 += (1 / 16.0f)*tex2D(imgToFilter, x - 1.0f * dx, y - 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x - 1.0f * dx, y + 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x + 1.0f * dx, y - 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x + 1.0f * dx, y + 1.0f * dy); t0 += (1 / 8.0f)*tex2D(imgToFilter, x - 1.0f * dx, y); t0 += (1 / 8.0f)*tex2D(imgToFilter, x + 1.0f * dx, y); t0 += (1 / 8.0f)*tex2D(imgToFilter, x, y - 1.0f * dy); t0 += (1 / 8.0f)*tex2D(imgToFilter, x, y + 1.0f * dy); output[pos] = t0; } } void lup::Upsampling::Gaussian(float* input, float* output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); // mirror if a coordinate value is out-of-range imgToFilter.addressMode[0] = hipAddressModeMirror; imgToFilter.addressMode[1] = hipAddressModeMirror; imgToFilter.filterMode = hipFilterModeLinear; imgToFilter.normalized = true; hipChannelFormatDesc desc = hipCreateChannelDesc<float>(); hipBindTexture2D(0, imgToFilter, input, width, height, stride * sizeof(float)); GaussianKernel << < blocks, threads >> > (input, output, width, height, stride); } // Solve eta_u, eta_v __global__ void SolveEtaKernel(float* weights, float alpha0, float alpha1, float* atensor, float *btensor, float* ctensor, float* etau, float* etav1, float* etav2, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float a = atensor[pos]; float b = btensor[pos]; float c = ctensor[pos]; etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1) + 0 * weights[pos] * weights[pos]; etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0; etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0; } } void lup::Upsampling::SolveEta(float* weights, float alpha0, float alpha1, float* a, float *b, float* c, float* etau, float* etav1, float* etav2) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); SolveEtaKernel << < blocks, threads >> > (weights, alpha0, alpha1, a, b, c, etau, etav1, etav2, width, height, stride); } // Multiply two matrices __global__ void MultKernel(float* input0, float*input1, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input0[pos] * input1[pos]; } } __global__ void MultKernel(float* input0, float scale, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input0[pos] * scale; } } void lup::Upsampling::Mult(float* input0, float* input1, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); MultKernel << < blocks, threads >> > (input0, input1, output, width, height, stride); } void lup::Upsampling::Mult(float* input0, float scale, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); MultKernel << < blocks, threads >> > (input0, scale, output, width, height, stride); } __global__ void NormalizeKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = (input[pos] - min) / (max - min); } } void lup::Upsampling::Normalize(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); NormalizeKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } __global__ void NormalizeClipKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = (input[pos] - min) / (max - min); if (output[pos] < 0.0f) { output[pos] = 0.0f; } if (output[pos] > 1.0f) { output[pos] = 1.0f; } } } void lup::Upsampling::NormalizeClip(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); NormalizeClipKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } __global__ void DenormalizeClipKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input[pos] * (max - min) + min; } } void lup::Upsampling::DenormalizeClip(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); DenormalizeClipKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } // Update Dual Variables (p, q) __global__ void UpdateDualVariablesTGVKernel(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float*c, float4* grad_v, float2* p, float4* q, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //u_x = dxp(u_) - v_(:, : , 1); float u_x, u_y; if ((ix + 1) < width) u_x = u_[right] - u_[pos] - v_[pos].x; else u_x = u_[pos] - u_[left] - v_[pos].x; //u_y = dyp(u_) - v_(:, : , 2); if ((iy + 1) < height) u_y = u_[down] - u_[pos] - v_[pos].y; else u_y = u_[pos] - u_[up] - v_[pos].y; //du_tensor_x = a.*u_x + c.*u_y; float du_tensor_x = a[pos] * u_x + c[pos] * u_y; //du_tensor_y = c.*u_x + b.*u_y; float du_tensor_y = c[pos] * u_x + b[pos] * u_y; //p(:, : , 1) = p(:, : , 1) + alpha1*sigma / eta_p.*du_tensor_x; p[pos].x = p[pos].x + (alpha1*sigma / eta_p) * du_tensor_x; //p(:, : , 2) = p(:, : , 2) + alpha1*sigma / eta_p.*du_tensor_y; p[pos].y = p[pos].y + (alpha1*sigma / eta_p) * du_tensor_y; //projection //reprojection = max(1.0, sqrt(p(:, : , 1). ^ 2 + p(:, : , 2). ^ 2)); float reprojection = sqrtf(p[pos].x * p[pos].x + p[pos].y * p[pos].y); if (reprojection < 1.0f) { reprojection = 1.0f; } //p(:, : , 1) = p(:, : , 1). / reprojection; p[pos].x = p[pos].x / reprojection; //p(:, : , 2) = p(:, : , 2). / reprojection; p[pos].y = p[pos].y / reprojection; //grad_v(:, : , 1) = dxp(v_(:, : , 1)); if ((ix + 1) < width) grad_v[pos].x = v_[right].x - v_[pos].x; else grad_v[pos].x = v_[pos].x - v_[left].x; //grad_v(:, : , 2) = dyp(v_(:, : , 2)); if ((iy + 1) < height) grad_v[pos].y = v_[down].y - v_[pos].y; else grad_v[pos].y = v_[pos].y - v_[up].y; //grad_v(:, : , 3) = dyp(v_(:, : , 1)); if ((iy + 1) < height) grad_v[pos].z = v_[down].x - v_[pos].x; else grad_v[pos].z = v_[pos].x - v_[up].x; //grad_v(:, : , 4) = dxp(v_(:, : , 2)); if ((ix + 1) < width) grad_v[pos].w = v_[right].y - v_[pos].y; else grad_v[pos].w = v_[pos].y - v_[left].y; //q = q + alpha0*sigma / eta_q.*grad_v; float ase = alpha0 * sigma / eta_q; q[pos].x = q[pos].x + ase * grad_v[pos].x; q[pos].y = q[pos].y + ase * grad_v[pos].y; q[pos].z = q[pos].z + ase * grad_v[pos].z; q[pos].w = q[pos].w + ase * grad_v[pos].w; //reproject = max(1.0, sqrt(q(:, : , 1). ^ 2 + q(:, : , 2). ^ 2 + q(:, : , 3). ^ 2 + q(:, : , 4). ^ 2)); float reproject = sqrtf(q[pos].x * q[pos].x + q[pos].y * q[pos].y + q[pos].z * q[pos].z + q[pos].w * q[pos].w); if (reproject < 1.0f) { reproject = 1.0f; } //q(:, : , 1) = q(:, : , 1). / reproject; q[pos].x = q[pos].x / reproject; //q(:, : , 2) = q(:, : , 2). / reproject; q[pos].y = q[pos].y / reproject; //q(:, : , 3) = q(:, : , 3). / reproject; q[pos].z = q[pos].z / reproject; //q(:, : , 4) = q(:, : , 4). / reproject; q[pos].w = q[pos].w / reproject; } } void lup::Upsampling::UpdateDualVariablesTGV(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float* c, float4* grad_v, float2* p, float4* q) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); UpdateDualVariablesTGVKernel << < blocks, threads >> > (u_, v_, alpha0, alpha1, sigma, eta_p, eta_q, a, b, c, grad_v, p, q, width, height, stride); } // Solve Tp __global__ void SolveTpKernel(float*a, float *b, float*c, float2* p, float2* Tp, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; Tp[pos].x = a[pos] * p[pos].x + c[pos] * p[pos].y; Tp[pos].y = c[pos] * p[pos].x + b[pos] * p[pos].y; } } void lup::Upsampling::SolveTp(float* a, float* b, float* c, float2* p, float2* Tp) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); SolveTpKernel << < blocks, threads >> > (a, b, c, p, Tp, width, height, stride); } // Clone __global__ void CloneKernel(float* dst, float* src, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; dst[pos] = src[pos]; } } __global__ void CloneKernel2(float2* dst, float2* src, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; dst[pos] = src[pos]; } } void lup::Upsampling::Clone(float* dst, float* src) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CloneKernel << < blocks, threads >> > (dst, src, width, height, stride); } void lup::Upsampling::Clone(float2* dst, float2* src) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CloneKernel2 << < blocks, threads >> > (dst, src, width, height, stride); } // Update Primal variables L2 (u, v) __global__ void UpdatePrimalVariablesL2Kernel(float2* Tp, float* u_, float2* v_, float2* p, float4* q, float* a, float* b, float* c, float tau, float* eta_u, float* eta_v1, float* eta_v2, float alpha0, float alpha1, float* w, float* dw, float mu, float* u, float2* v, float* u_s, float2* v_s, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //div_p = dxm(Tp(:, : , 1)) + dym(Tp(:, : , 2)); float div_p; float dxmTp, dymTp; if ((ix - 1) >= 0) dxmTp = Tp[pos].x - Tp[left].x; else if (ix == width - 1) dxmTp = -Tp[left].x; else dxmTp = Tp[pos].x; if ((iy - 1) >= 0) dymTp = Tp[pos].y - Tp[up].y; else if (iy == height - 1) dymTp = -Tp[up].y; else dymTp = Tp[pos].y; div_p = dxmTp + dymTp; //tau_eta_u = tau. / eta_u; float tau_eta_u = tau / eta_u[pos]; //u = (u_ + tau_eta_u.*(alpha1.*div_p + dw)). / (1 + tau_eta_u.*w); u[pos] = (u_[pos] + tau_eta_u * (alpha1 * div_p + dw[pos])) / (1 + tau_eta_u * w[pos]); //qc(:, : , 1) = [q(:, 1 : end - 1, 1), zeros(M, 1)]; //qc(:, : , 2) = [q(1:end - 1, : , 2); zeros(1, N)]; //qc(:, : , 3) = [q(1:end - 1, : , 3); zeros(1, N)]; //qc(:, : , 4) = [q(:, 1 : end - 1, 4), zeros(M, 1)]; float4 qc; if (ix == width - 1) { qc.x = 0.0f; qc.w = 0.0f; } else { qc.x = q[pos].x; qc.w = q[pos].w; } if (iy == height - 1) { qc.y = 0.0f; qc.z = 0.0f; } else { qc.y = q[pos].y; qc.z = q[pos].z; } //qw_x = [zeros(M, 1, 1), q(:, 1 : end - 1, 1)]; //qw_w = [zeros(M, 1, 1), q(:, 1 : end - 1, 4)]; float qw_x, qw_w; if ((ix - 1) >= 0) { qw_x = q[left].x; qw_w = q[left].w; } else { qw_x = 0.0f; qw_w = 0.0f; } //qn_y = [zeros(1, N, 1); q(1:end - 1, : , 2)]; //qn_z = [zeros(1, N, 1); q(1:end - 1, : , 3)]; float qn_y, qn_z; if ((iy - 1) >= 0) { qn_y = q[up].y; qn_z = q[up].z; } else { qn_y = 0.0f; qn_z = 0.0f; } //div_q(:, : , 1) = (qc(:, : , 1) - qw_x) + (qc(:, : , 3) - qn_z); //div_q(:, : , 2) = (qc(:, : , 4) - qw_w) + (qc(:, : , 2) - qn_y); float2 div_q; div_q.x = (qc.x - qw_x) + (qc.z - qn_z); div_q.y = (qc.w - qw_w) + (qc.y - qn_y); //dq_tensor(:, : , 1) = a.*p(:, : , 1) + c.*p(:, : , 2); //dq_tensor(:, : , 2) = c.*p(:, : , 1) + b.*p(:, : , 2); float2 dq_tensor; dq_tensor.x = a[pos] * p[pos].x + c[pos] * p[pos].y; dq_tensor.y = c[pos] * p[pos].x + b[pos] * p[pos].y; //v = v_ + tau. / eta_v.*(alpha1.*dq_tensor + alpha0.*div_q); v[pos].x = v_[pos].x + (tau / eta_v1[pos]) * (alpha1 * dq_tensor.x + alpha0 * div_q.x); v[pos].y = v_[pos].y + (tau / eta_v2[pos]) * (alpha1 * dq_tensor.y + alpha0 * div_q.y); // over - relaxation //u_ = u + mu.*(u - u_); //v_ = v + mu.*(v - v_); u_s[pos] = u[pos] + mu * (u[pos] - u_[pos]); v_s[pos].x = v[pos].x + mu * (v[pos].x - v_[pos].x); v_s[pos].y = v[pos].y + mu * (v[pos].y - v_[pos].y); } } void lup::Upsampling::UpdatePrimalVariablesL2(float2* Tp, float* u_, float2* v_, float2* p, float4* q, float* a, float* b, float* c, float tau, float* eta_u, float* eta_v1, float* eta_v2, float alpha0, float alpha1, float* w, float* dw, float mu, float* u, float2* v, float* u_s, float2* v_s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); UpdatePrimalVariablesL2Kernel << < blocks, threads >> > (Tp, u_, v_, p, q, a, b, c, tau, eta_u, eta_v1, eta_v2, alpha0, alpha1, w, dw, mu, u, v, u_s, v_s, width, height, stride); } //void lup::Upsampling::UpsamplingTensorTVGL2(int w, int h, int s, float* u_init, float* depth, float* weight, // float* gray, float beta, float gamma, float tgv_alpha, float lambda, int maxits) //{ // //}
9656a54949b54937cd2eab685592e269f00a57ba.cu
#include "upsampling.h" // Textures texture<float, 2, cudaReadModeElementType> gray_img; texture<float, 2, cudaReadModeElementType> imgToFilter; // Calculate weight __global__ void CalcWeightKernel(float *input, float* weight, float lambda_tgvl2, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; if (input[pos] > 0) { weight[pos] = 1.0f; } else { weight[pos] = 0.0f; } weight[pos] = weight[pos] * lambda_tgvl2; } } void lup::Upsampling::CalcWeight(float *input, float *weight, float lambda_tgvl2) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CalcWeightKernel << < blocks, threads >> > (input, weight, lambda_tgvl2, width, height, stride); } // Calculate anisotropic diffusion tensor __global__ void CalcTensorKernel(float* gray, float beta, float gamma, int size_grad, float* atensor, float* btensor, float* ctensor, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float2 grad; float t0; // x derivative /*t0 = tex2D(gray_img, x - 2.0f * dx, y); t0 -= tex2D(gray_img, x - 1.0f * dx, y) * 8.0f; t0 += tex2D(gray_img, x + 1.0f * dx, y) * 8.0f; t0 -= tex2D(gray_img, x + 2.0f * dx, y); t0 /= 12.0f;*/ t0 = tex2D(gray_img, x + 1.0f * dx, y); t0 -= tex2D(gray_img, x, y); t0 = tex2D(gray_img, x + 1.0f * dx, y + 1.0f * dy); t0 -= tex2D(gray_img, x, y + 1.0f * dy); grad.x = t0; // y derivative /*t0 = tex2D(gray_img, x, y - 2.0f * dy); t0 -= tex2D(gray_img, x, y - 1.0f * dy) * 8.0f; t0 += tex2D(gray_img, x, y + 1.0f * dy) * 8.0f; t0 -= tex2D(gray_img, x, y + 2.0f * dy); t0 /= 12.0f;*/ t0 = tex2D(gray_img, x, y + 1.0f * dy); t0 -= tex2D(gray_img, x, y); t0 = tex2D(gray_img, x + 1.0f * dx, y + 1.0f * dy); t0 -= tex2D(gray_img, x + 1.0f * dx, y); grad.y = t0; float min_n_length = 1e-8f; float min_tensor_val = 1e-8f; float abs_img = sqrtf(grad.x*grad.x + grad.y*grad.y); float norm_n = abs_img; float2 n_normed; n_normed.x = grad.x / norm_n; n_normed.y = grad.y / norm_n; if (norm_n < min_n_length) { n_normed.x = 1.0f; n_normed.y = 0.0f; } float2 nT_normed; nT_normed.x = n_normed.y; nT_normed.y = -n_normed.x; float wtensor; if (expf(-beta * powf(abs_img, gamma)) > min_tensor_val) { wtensor = expf(-beta * powf(abs_img, gamma)); } else wtensor = min_tensor_val; float a = wtensor * n_normed.x * n_normed.x + nT_normed.x * nT_normed.x; float c = wtensor * n_normed.x * n_normed.y + nT_normed.x * nT_normed.y; float b = wtensor * n_normed.y * n_normed.y + nT_normed.y * nT_normed.y; atensor[pos] = a; btensor[pos] = b; ctensor[pos] = c; } } void lup::Upsampling::CalcTensor(float* gray, float beta, float gamma, int size_grad, float* a, float* b, float* c) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); // mirror if a coordinate value is out-of-range gray_img.addressMode[0] = cudaAddressModeMirror; gray_img.addressMode[1] = cudaAddressModeMirror; gray_img.filterMode = cudaFilterModeLinear; gray_img.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, gray_img, gray, width, height, stride * sizeof(float)); CalcTensorKernel << < blocks, threads >> > (gray, beta, gamma, size_grad, a, b, c, width, height, stride); } // Gaussian Filter // Calculate anisotropic diffusion tensor __global__ void GaussianKernel(float* input, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float dx = 1.0f / (float)width; float dy = 1.0f / (float)height; float x = ((float)ix + 0.5f) * dx; float y = ((float)iy + 0.5f) * dy; float2 grad; float t0 = (1 / 4.0f)*tex2D(imgToFilter, x, y); t0 += (1 / 16.0f)*tex2D(imgToFilter, x - 1.0f * dx, y - 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x - 1.0f * dx, y + 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x + 1.0f * dx, y - 1.0f * dy); t0 += (1 / 16.0f)*tex2D(imgToFilter, x + 1.0f * dx, y + 1.0f * dy); t0 += (1 / 8.0f)*tex2D(imgToFilter, x - 1.0f * dx, y); t0 += (1 / 8.0f)*tex2D(imgToFilter, x + 1.0f * dx, y); t0 += (1 / 8.0f)*tex2D(imgToFilter, x, y - 1.0f * dy); t0 += (1 / 8.0f)*tex2D(imgToFilter, x, y + 1.0f * dy); output[pos] = t0; } } void lup::Upsampling::Gaussian(float* input, float* output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); // mirror if a coordinate value is out-of-range imgToFilter.addressMode[0] = cudaAddressModeMirror; imgToFilter.addressMode[1] = cudaAddressModeMirror; imgToFilter.filterMode = cudaFilterModeLinear; imgToFilter.normalized = true; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>(); cudaBindTexture2D(0, imgToFilter, input, width, height, stride * sizeof(float)); GaussianKernel << < blocks, threads >> > (input, output, width, height, stride); } // Solve eta_u, eta_v __global__ void SolveEtaKernel(float* weights, float alpha0, float alpha1, float* atensor, float *btensor, float* ctensor, float* etau, float* etav1, float* etav2, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; float a = atensor[pos]; float b = btensor[pos]; float c = ctensor[pos]; etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1) + 0 * weights[pos] * weights[pos]; etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0; etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0; } } void lup::Upsampling::SolveEta(float* weights, float alpha0, float alpha1, float* a, float *b, float* c, float* etau, float* etav1, float* etav2) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); SolveEtaKernel << < blocks, threads >> > (weights, alpha0, alpha1, a, b, c, etau, etav1, etav2, width, height, stride); } // Multiply two matrices __global__ void MultKernel(float* input0, float*input1, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input0[pos] * input1[pos]; } } __global__ void MultKernel(float* input0, float scale, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input0[pos] * scale; } } void lup::Upsampling::Mult(float* input0, float* input1, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); MultKernel << < blocks, threads >> > (input0, input1, output, width, height, stride); } void lup::Upsampling::Mult(float* input0, float scale, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); MultKernel << < blocks, threads >> > (input0, scale, output, width, height, stride); } __global__ void NormalizeKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = (input[pos] - min) / (max - min); } } void lup::Upsampling::Normalize(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); NormalizeKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } __global__ void NormalizeClipKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = (input[pos] - min) / (max - min); if (output[pos] < 0.0f) { output[pos] = 0.0f; } if (output[pos] > 1.0f) { output[pos] = 1.0f; } } } void lup::Upsampling::NormalizeClip(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); NormalizeClipKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } __global__ void DenormalizeClipKernel(float* input, float min, float max, float* output, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; output[pos] = input[pos] * (max - min) + min; } } void lup::Upsampling::DenormalizeClip(float* input, float min, float max, float *output) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); DenormalizeClipKernel << < blocks, threads >> > (input, min, max, output, width, height, stride); } // Update Dual Variables (p, q) __global__ void UpdateDualVariablesTGVKernel(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float*c, float4* grad_v, float2* p, float4* q, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //u_x = dxp(u_) - v_(:, : , 1); float u_x, u_y; if ((ix + 1) < width) u_x = u_[right] - u_[pos] - v_[pos].x; else u_x = u_[pos] - u_[left] - v_[pos].x; //u_y = dyp(u_) - v_(:, : , 2); if ((iy + 1) < height) u_y = u_[down] - u_[pos] - v_[pos].y; else u_y = u_[pos] - u_[up] - v_[pos].y; //du_tensor_x = a.*u_x + c.*u_y; float du_tensor_x = a[pos] * u_x + c[pos] * u_y; //du_tensor_y = c.*u_x + b.*u_y; float du_tensor_y = c[pos] * u_x + b[pos] * u_y; //p(:, : , 1) = p(:, : , 1) + alpha1*sigma / eta_p.*du_tensor_x; p[pos].x = p[pos].x + (alpha1*sigma / eta_p) * du_tensor_x; //p(:, : , 2) = p(:, : , 2) + alpha1*sigma / eta_p.*du_tensor_y; p[pos].y = p[pos].y + (alpha1*sigma / eta_p) * du_tensor_y; //projection //reprojection = max(1.0, sqrt(p(:, : , 1). ^ 2 + p(:, : , 2). ^ 2)); float reprojection = sqrtf(p[pos].x * p[pos].x + p[pos].y * p[pos].y); if (reprojection < 1.0f) { reprojection = 1.0f; } //p(:, : , 1) = p(:, : , 1). / reprojection; p[pos].x = p[pos].x / reprojection; //p(:, : , 2) = p(:, : , 2). / reprojection; p[pos].y = p[pos].y / reprojection; //grad_v(:, : , 1) = dxp(v_(:, : , 1)); if ((ix + 1) < width) grad_v[pos].x = v_[right].x - v_[pos].x; else grad_v[pos].x = v_[pos].x - v_[left].x; //grad_v(:, : , 2) = dyp(v_(:, : , 2)); if ((iy + 1) < height) grad_v[pos].y = v_[down].y - v_[pos].y; else grad_v[pos].y = v_[pos].y - v_[up].y; //grad_v(:, : , 3) = dyp(v_(:, : , 1)); if ((iy + 1) < height) grad_v[pos].z = v_[down].x - v_[pos].x; else grad_v[pos].z = v_[pos].x - v_[up].x; //grad_v(:, : , 4) = dxp(v_(:, : , 2)); if ((ix + 1) < width) grad_v[pos].w = v_[right].y - v_[pos].y; else grad_v[pos].w = v_[pos].y - v_[left].y; //q = q + alpha0*sigma / eta_q.*grad_v; float ase = alpha0 * sigma / eta_q; q[pos].x = q[pos].x + ase * grad_v[pos].x; q[pos].y = q[pos].y + ase * grad_v[pos].y; q[pos].z = q[pos].z + ase * grad_v[pos].z; q[pos].w = q[pos].w + ase * grad_v[pos].w; //reproject = max(1.0, sqrt(q(:, : , 1). ^ 2 + q(:, : , 2). ^ 2 + q(:, : , 3). ^ 2 + q(:, : , 4). ^ 2)); float reproject = sqrtf(q[pos].x * q[pos].x + q[pos].y * q[pos].y + q[pos].z * q[pos].z + q[pos].w * q[pos].w); if (reproject < 1.0f) { reproject = 1.0f; } //q(:, : , 1) = q(:, : , 1). / reproject; q[pos].x = q[pos].x / reproject; //q(:, : , 2) = q(:, : , 2). / reproject; q[pos].y = q[pos].y / reproject; //q(:, : , 3) = q(:, : , 3). / reproject; q[pos].z = q[pos].z / reproject; //q(:, : , 4) = q(:, : , 4). / reproject; q[pos].w = q[pos].w / reproject; } } void lup::Upsampling::UpdateDualVariablesTGV(float* u_, float2 *v_, float alpha0, float alpha1, float sigma, float eta_p, float eta_q, float* a, float* b, float* c, float4* grad_v, float2* p, float4* q) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); UpdateDualVariablesTGVKernel << < blocks, threads >> > (u_, v_, alpha0, alpha1, sigma, eta_p, eta_q, a, b, c, grad_v, p, q, width, height, stride); } // Solve Tp __global__ void SolveTpKernel(float*a, float *b, float*c, float2* p, float2* Tp, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; Tp[pos].x = a[pos] * p[pos].x + c[pos] * p[pos].y; Tp[pos].y = c[pos] * p[pos].x + b[pos] * p[pos].y; } } void lup::Upsampling::SolveTp(float* a, float* b, float* c, float2* p, float2* Tp) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); SolveTpKernel << < blocks, threads >> > (a, b, c, p, Tp, width, height, stride); } // Clone __global__ void CloneKernel(float* dst, float* src, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; dst[pos] = src[pos]; } } __global__ void CloneKernel2(float2* dst, float2* src, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; dst[pos] = src[pos]; } } void lup::Upsampling::Clone(float* dst, float* src) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CloneKernel << < blocks, threads >> > (dst, src, width, height, stride); } void lup::Upsampling::Clone(float2* dst, float2* src) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); CloneKernel2 << < blocks, threads >> > (dst, src, width, height, stride); } // Update Primal variables L2 (u, v) __global__ void UpdatePrimalVariablesL2Kernel(float2* Tp, float* u_, float2* v_, float2* p, float4* q, float* a, float* b, float* c, float tau, float* eta_u, float* eta_v1, float* eta_v2, float alpha0, float alpha1, float* w, float* dw, float mu, float* u, float2* v, float* u_s, float2* v_s, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; int right = (ix + 1) + iy * stride; int down = ix + (iy + 1) * stride; int left = (ix - 1) + iy * stride; int up = ix + (iy - 1) * stride; //div_p = dxm(Tp(:, : , 1)) + dym(Tp(:, : , 2)); float div_p; float dxmTp, dymTp; if ((ix - 1) >= 0) dxmTp = Tp[pos].x - Tp[left].x; else if (ix == width - 1) dxmTp = -Tp[left].x; else dxmTp = Tp[pos].x; if ((iy - 1) >= 0) dymTp = Tp[pos].y - Tp[up].y; else if (iy == height - 1) dymTp = -Tp[up].y; else dymTp = Tp[pos].y; div_p = dxmTp + dymTp; //tau_eta_u = tau. / eta_u; float tau_eta_u = tau / eta_u[pos]; //u = (u_ + tau_eta_u.*(alpha1.*div_p + dw)). / (1 + tau_eta_u.*w); u[pos] = (u_[pos] + tau_eta_u * (alpha1 * div_p + dw[pos])) / (1 + tau_eta_u * w[pos]); //qc(:, : , 1) = [q(:, 1 : end - 1, 1), zeros(M, 1)]; //qc(:, : , 2) = [q(1:end - 1, : , 2); zeros(1, N)]; //qc(:, : , 3) = [q(1:end - 1, : , 3); zeros(1, N)]; //qc(:, : , 4) = [q(:, 1 : end - 1, 4), zeros(M, 1)]; float4 qc; if (ix == width - 1) { qc.x = 0.0f; qc.w = 0.0f; } else { qc.x = q[pos].x; qc.w = q[pos].w; } if (iy == height - 1) { qc.y = 0.0f; qc.z = 0.0f; } else { qc.y = q[pos].y; qc.z = q[pos].z; } //qw_x = [zeros(M, 1, 1), q(:, 1 : end - 1, 1)]; //qw_w = [zeros(M, 1, 1), q(:, 1 : end - 1, 4)]; float qw_x, qw_w; if ((ix - 1) >= 0) { qw_x = q[left].x; qw_w = q[left].w; } else { qw_x = 0.0f; qw_w = 0.0f; } //qn_y = [zeros(1, N, 1); q(1:end - 1, : , 2)]; //qn_z = [zeros(1, N, 1); q(1:end - 1, : , 3)]; float qn_y, qn_z; if ((iy - 1) >= 0) { qn_y = q[up].y; qn_z = q[up].z; } else { qn_y = 0.0f; qn_z = 0.0f; } //div_q(:, : , 1) = (qc(:, : , 1) - qw_x) + (qc(:, : , 3) - qn_z); //div_q(:, : , 2) = (qc(:, : , 4) - qw_w) + (qc(:, : , 2) - qn_y); float2 div_q; div_q.x = (qc.x - qw_x) + (qc.z - qn_z); div_q.y = (qc.w - qw_w) + (qc.y - qn_y); //dq_tensor(:, : , 1) = a.*p(:, : , 1) + c.*p(:, : , 2); //dq_tensor(:, : , 2) = c.*p(:, : , 1) + b.*p(:, : , 2); float2 dq_tensor; dq_tensor.x = a[pos] * p[pos].x + c[pos] * p[pos].y; dq_tensor.y = c[pos] * p[pos].x + b[pos] * p[pos].y; //v = v_ + tau. / eta_v.*(alpha1.*dq_tensor + alpha0.*div_q); v[pos].x = v_[pos].x + (tau / eta_v1[pos]) * (alpha1 * dq_tensor.x + alpha0 * div_q.x); v[pos].y = v_[pos].y + (tau / eta_v2[pos]) * (alpha1 * dq_tensor.y + alpha0 * div_q.y); // over - relaxation //u_ = u + mu.*(u - u_); //v_ = v + mu.*(v - v_); u_s[pos] = u[pos] + mu * (u[pos] - u_[pos]); v_s[pos].x = v[pos].x + mu * (v[pos].x - v_[pos].x); v_s[pos].y = v[pos].y + mu * (v[pos].y - v_[pos].y); } } void lup::Upsampling::UpdatePrimalVariablesL2(float2* Tp, float* u_, float2* v_, float2* p, float4* q, float* a, float* b, float* c, float tau, float* eta_u, float* eta_v1, float* eta_v2, float alpha0, float alpha1, float* w, float* dw, float mu, float* u, float2* v, float* u_s, float2* v_s) { dim3 threads(BlockWidth, BlockHeight); dim3 blocks(iDivUp(width, threads.x), iDivUp(height, threads.y)); UpdatePrimalVariablesL2Kernel << < blocks, threads >> > (Tp, u_, v_, p, q, a, b, c, tau, eta_u, eta_v1, eta_v2, alpha0, alpha1, w, dw, mu, u, v, u_s, v_s, width, height, stride); } //void lup::Upsampling::UpsamplingTensorTVGL2(int w, int h, int s, float* u_init, float* depth, float* weight, // float* gray, float beta, float gamma, float tgv_alpha, float lambda, int maxits) //{ // //}
f2fa78ab5dd8a219125288a62d16edf60b55c640.hip
// !!! This is a file automatically generated by hipify!!! // adjointness testing #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { int N = 1; PCS sigma = 2; // upsampling factor int M = 100; PCS epsilon = 1e-12; int kerevalmeth = 0; int method = 0; // memory allocation //Host PCS *u; CPX *c; CPX *fw; PCS *zp; PCS *fwkerhalf; u = (PCS *)malloc(M * sizeof(PCS)); c = (CPX *)malloc(M * sizeof(CPX)); PCS *z = (PCS *)malloc(N * sizeof(PCS)); CPX *fk = (CPX *)malloc(N * sizeof(CPX)); //Device PCS *d_u, *d_z; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(hipMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(hipMalloc(&d_z, N * sizeof(PCS))); checkCudaErrors(hipMalloc(&d_c, M * sizeof(CUCPX))); checkCudaErrors(hipMalloc(&d_fk, N * sizeof(CUCPX))); // data generation for (int i = 0; i < M; i++) { u[i] = 2.0 + i; //xxxxx c[i].real(randm11()); c[i].imag(randm11()); // wgt[i] = 1; } for (int i = 0; i < N; i++) { z[i] = -randm11(); fk[i].real(randm11() * 1000); fk[i].imag(0); } // result allocation CPX *c_2 = (CPX *)malloc(M * sizeof(CPX)); memset(c_2, 0, sizeof(CPX)*M); CPX *fk_2 = (CPX *)malloc(N * sizeof(CPX)); memset(fk_2, 0, sizeof(CPX)*N); /*-------------------------------------- C -> Fk ---------------------------------*/ // setting plan curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); // memory transfering checkCudaErrors(hipMemcpy(d_z, z, sizeof(PCS)*N, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_u, u, sizeof(PCS)*M, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_c, c, sizeof(CUCPX)*M, hipMemcpyHostToDevice)); plan->d_x = d_z; int direction = 1; cunufft_setting(N, 1, 1, M, kerevalmeth, method, direction, epsilon, sigma, 3, 1, d_u, NULL, NULL, d_c, plan); int nf1 = plan->nf1; // correction factor fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); checkCudaErrors(hipMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS)*(N), hipMemcpyDeviceToHost)); // fw malloc and set checkCudaErrors(hipMalloc((void**)&d_fw,sizeof(CUCPX)*plan->nf1)); checkCudaErrors(hipMemset(d_fw, 0, sizeof(CUCPX)*plan->nf1)); plan->fw = d_fw; CUCPX *d_fk_2; checkCudaErrors(hipMalloc((void**)&d_fk_2,sizeof(CUCPX)*N)); checkCudaErrors(hipMemset(d_fk_2, 0, sizeof(CUCPX)*N)); plan->fk = d_fk_2; // conv curafft_conv(plan); fw = (CPX *)malloc(sizeof(CPX)*plan->nf1); hipMemcpy(fw,plan->fw,sizeof(CUCPX)*plan->nf1,hipMemcpyDeviceToHost); zp = (PCS *) malloc(sizeof(PCS)*N); checkCudaErrors(hipMemcpy(zp,plan->d_x,sizeof(PCS)*N,hipMemcpyDeviceToHost)); // dft for (int i = 0; i < N; i++) { for (int j = 0; j < plan->nf1; j++) { if(j<nf1/2)fk_2[i] += fw[j+nf1/2] * exp((PCS)j * zp[i] * IMA); else fk_2[i] += fw[j-nf1/2] * exp(((PCS)j-(PCS)nf1) * zp[i] * IMA); } } //deconv for(int i=0; i<N; i++){ fk_2[i] = fk_2[i] / fwkerhalf[i] * exp((z[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); } CPX *truth = (CPX *) malloc(sizeof(CPX)*N); printf("ground truth printing...\n"); for (int i = 0; i < N; i++) { truth[i] = 0; for (int j = 0; j < M; j++) { truth[i] += c[j] * exp(z[i] * u[j] * IMA); } } CPX diff; double err=0; double nrm=0; for(int i=0; i<N; i++){ diff = truth[i] - fk_2[i]; err += real(conj(diff)*diff); nrm += real(conj(fk_2[i])*fk_2[i]); } printf("l2 error %.6g\n",sqrt(err/nrm)); // free free(plan); /*-------------------------------------- Fk -> C ---------------------------------*/ plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); checkCudaErrors(hipMemcpy(d_z, z, sizeof(PCS) * N, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_u, u, sizeof(PCS)*M, hipMemcpyHostToDevice)); checkCudaErrors(hipMemset(d_c, 0, sizeof(CUCPX)*M)); plan->d_x = d_z; direction = 0; cunufft_setting(N, 1, 1, M, kerevalmeth, method, direction, epsilon, sigma, 3, 1, d_u, NULL, NULL, d_c, plan); nf1 = plan->nf1; memset(fw, 0, sizeof(CPX)*nf1); checkCudaErrors(hipMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS) * (N), hipMemcpyDeviceToHost)); // can remove this line CPX *fk_1 = (CPX*) malloc(sizeof(CPX)*N); // deconv for (int i = 0; i < N; i++) { fk_1[i] = fk[i]; fk_1[i] = fk_1[i] / fwkerhalf[i] * exp(-(z[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); // fk[i] = fk[i] / fwkerhalf[i] * exp(-k[i]*plan->ta.i_center[0]*IMA); } // checkCudaErrors(hipMemcpy(zp, plan->d_x, sizeof(PCS) * N, hipMemcpyDeviceToHost)); // idft for (int j = 0; j < plan->nf1; j++) { for (int i = 0; i < N; i++) { CPX temp = exp(-(j-nf1/2) * zp[i] * IMA); fw[j] += fk_1[i] * temp; } } // interp checkCudaErrors(hipMemcpy(d_fw, fw, sizeof(CUCPX) * plan->nf1, hipMemcpyHostToDevice)); plan->fw = d_fw; curafft_interp(plan); checkCudaErrors(hipMemcpy(c_2, plan->d_c, sizeof(CUCPX) * M, hipMemcpyDeviceToHost)); for (int i = 0; i < M; i++) { CPX temp = exp(-u[i] * plan->ta.o_center[0] * IMA); c_2[i] = c_2[i] * temp; // some issues } truth = (CPX *)malloc(sizeof(CPX) * M); printf("ground truth printing...\n"); for (int j = 0; j < M; j++) { truth[j] = 0; for (int i = 0; i < N; i++) { truth[j] += fk[i] * exp(-z[i] * u[j] * IMA); } } err = 0; nrm = 0; for (int i = 0; i < M; i++) { diff = truth[i] - c_2[i]; err += real(conj(diff) * diff); nrm += real(conj(c_2[i]) * c_2[i]); } printf("l2 error %.6g\n", sqrt(err / nrm)); PCS adjt_1 = 0; for(int i=0; i<M; i++){ adjt_1 += (conj(c_2[i])*c[i]).real(); } PCS adjt_2 = 0; for(int i=0; i<N; i++){ adjt_2 += (conj(fk_2[i])*fk[i]).real(); } printf("adjointness checking...\n %.10lf, %.10lf\n",adjt_1,adjt_2); //free memory // Device curafft_free(plan); //Host free(u); free(c); free(z); free(fk); free(c_2); free(fk_2); free(fwkerhalf); free(fw); free(zp); free(fk_1); return 0; }
f2fa78ab5dd8a219125288a62d16edf60b55c640.cu
// adjointness testing #include <iostream> #include <iomanip> #include <math.h> #include <helper_cuda.h> #include <thrust/complex.h> #include <algorithm> //#include <thrust> using namespace thrust; #include "ragridder_plan.h" #include "conv_interp_invoker.h" #include "cuft.h" #include "deconv.h" #include "cugridder.h" #include "precomp.h" #include "utils.h" int main(int argc, char *argv[]) { int N = 1; PCS sigma = 2; // upsampling factor int M = 100; PCS epsilon = 1e-12; int kerevalmeth = 0; int method = 0; // memory allocation //Host PCS *u; CPX *c; CPX *fw; PCS *zp; PCS *fwkerhalf; u = (PCS *)malloc(M * sizeof(PCS)); c = (CPX *)malloc(M * sizeof(CPX)); PCS *z = (PCS *)malloc(N * sizeof(PCS)); CPX *fk = (CPX *)malloc(N * sizeof(CPX)); //Device PCS *d_u, *d_z; CUCPX *d_c, *d_fk; CUCPX *d_fw; checkCudaErrors(cudaMalloc(&d_u, M * sizeof(PCS))); checkCudaErrors(cudaMalloc(&d_z, N * sizeof(PCS))); checkCudaErrors(cudaMalloc(&d_c, M * sizeof(CUCPX))); checkCudaErrors(cudaMalloc(&d_fk, N * sizeof(CUCPX))); // data generation for (int i = 0; i < M; i++) { u[i] = 2.0 + i; //xxxxx c[i].real(randm11()); c[i].imag(randm11()); // wgt[i] = 1; } for (int i = 0; i < N; i++) { z[i] = -randm11(); fk[i].real(randm11() * 1000); fk[i].imag(0); } // result allocation CPX *c_2 = (CPX *)malloc(M * sizeof(CPX)); memset(c_2, 0, sizeof(CPX)*M); CPX *fk_2 = (CPX *)malloc(N * sizeof(CPX)); memset(fk_2, 0, sizeof(CPX)*N); /*-------------------------------------- C -> Fk ---------------------------------*/ // setting plan curafft_plan *plan; plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); // memory transfering checkCudaErrors(cudaMemcpy(d_z, z, sizeof(PCS)*N, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_u, u, sizeof(PCS)*M, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_c, c, sizeof(CUCPX)*M, cudaMemcpyHostToDevice)); plan->d_x = d_z; int direction = 1; cunufft_setting(N, 1, 1, M, kerevalmeth, method, direction, epsilon, sigma, 3, 1, d_u, NULL, NULL, d_c, plan); int nf1 = plan->nf1; // correction factor fwkerhalf = (PCS *)malloc(sizeof(PCS)*(N)); checkCudaErrors(cudaMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS)*(N), cudaMemcpyDeviceToHost)); // fw malloc and set checkCudaErrors(cudaMalloc((void**)&d_fw,sizeof(CUCPX)*plan->nf1)); checkCudaErrors(cudaMemset(d_fw, 0, sizeof(CUCPX)*plan->nf1)); plan->fw = d_fw; CUCPX *d_fk_2; checkCudaErrors(cudaMalloc((void**)&d_fk_2,sizeof(CUCPX)*N)); checkCudaErrors(cudaMemset(d_fk_2, 0, sizeof(CUCPX)*N)); plan->fk = d_fk_2; // conv curafft_conv(plan); fw = (CPX *)malloc(sizeof(CPX)*plan->nf1); cudaMemcpy(fw,plan->fw,sizeof(CUCPX)*plan->nf1,cudaMemcpyDeviceToHost); zp = (PCS *) malloc(sizeof(PCS)*N); checkCudaErrors(cudaMemcpy(zp,plan->d_x,sizeof(PCS)*N,cudaMemcpyDeviceToHost)); // dft for (int i = 0; i < N; i++) { for (int j = 0; j < plan->nf1; j++) { if(j<nf1/2)fk_2[i] += fw[j+nf1/2] * exp((PCS)j * zp[i] * IMA); else fk_2[i] += fw[j-nf1/2] * exp(((PCS)j-(PCS)nf1) * zp[i] * IMA); } } //deconv for(int i=0; i<N; i++){ fk_2[i] = fk_2[i] / fwkerhalf[i] * exp((z[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); } CPX *truth = (CPX *) malloc(sizeof(CPX)*N); printf("ground truth printing...\n"); for (int i = 0; i < N; i++) { truth[i] = 0; for (int j = 0; j < M; j++) { truth[i] += c[j] * exp(z[i] * u[j] * IMA); } } CPX diff; double err=0; double nrm=0; for(int i=0; i<N; i++){ diff = truth[i] - fk_2[i]; err += real(conj(diff)*diff); nrm += real(conj(fk_2[i])*fk_2[i]); } printf("l2 error %.6g\n",sqrt(err/nrm)); // free free(plan); /*-------------------------------------- Fk -> C ---------------------------------*/ plan = new curafft_plan(); memset(plan, 0, sizeof(curafft_plan)); checkCudaErrors(cudaMemcpy(d_z, z, sizeof(PCS) * N, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_u, u, sizeof(PCS)*M, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemset(d_c, 0, sizeof(CUCPX)*M)); plan->d_x = d_z; direction = 0; cunufft_setting(N, 1, 1, M, kerevalmeth, method, direction, epsilon, sigma, 3, 1, d_u, NULL, NULL, d_c, plan); nf1 = plan->nf1; memset(fw, 0, sizeof(CPX)*nf1); checkCudaErrors(cudaMemcpy(fwkerhalf, plan->fwkerhalf1, sizeof(PCS) * (N), cudaMemcpyDeviceToHost)); // can remove this line CPX *fk_1 = (CPX*) malloc(sizeof(CPX)*N); // deconv for (int i = 0; i < N; i++) { fk_1[i] = fk[i]; fk_1[i] = fk_1[i] / fwkerhalf[i] * exp(-(z[i]-plan->ta.o_center[0])*plan->ta.i_center[0]*IMA); // fk[i] = fk[i] / fwkerhalf[i] * exp(-k[i]*plan->ta.i_center[0]*IMA); } // checkCudaErrors(cudaMemcpy(zp, plan->d_x, sizeof(PCS) * N, cudaMemcpyDeviceToHost)); // idft for (int j = 0; j < plan->nf1; j++) { for (int i = 0; i < N; i++) { CPX temp = exp(-(j-nf1/2) * zp[i] * IMA); fw[j] += fk_1[i] * temp; } } // interp checkCudaErrors(cudaMemcpy(d_fw, fw, sizeof(CUCPX) * plan->nf1, cudaMemcpyHostToDevice)); plan->fw = d_fw; curafft_interp(plan); checkCudaErrors(cudaMemcpy(c_2, plan->d_c, sizeof(CUCPX) * M, cudaMemcpyDeviceToHost)); for (int i = 0; i < M; i++) { CPX temp = exp(-u[i] * plan->ta.o_center[0] * IMA); c_2[i] = c_2[i] * temp; // some issues } truth = (CPX *)malloc(sizeof(CPX) * M); printf("ground truth printing...\n"); for (int j = 0; j < M; j++) { truth[j] = 0; for (int i = 0; i < N; i++) { truth[j] += fk[i] * exp(-z[i] * u[j] * IMA); } } err = 0; nrm = 0; for (int i = 0; i < M; i++) { diff = truth[i] - c_2[i]; err += real(conj(diff) * diff); nrm += real(conj(c_2[i]) * c_2[i]); } printf("l2 error %.6g\n", sqrt(err / nrm)); PCS adjt_1 = 0; for(int i=0; i<M; i++){ adjt_1 += (conj(c_2[i])*c[i]).real(); } PCS adjt_2 = 0; for(int i=0; i<N; i++){ adjt_2 += (conj(fk_2[i])*fk[i]).real(); } printf("adjointness checking...\n %.10lf, %.10lf\n",adjt_1,adjt_2); //free memory // Device curafft_free(plan); //Host free(u); free(c); free(z); free(fk); free(c_2); free(fk_2); free(fwkerhalf); free(fw); free(zp); free(fk_1); return 0; }
1f5b4b95e3959eb97259b530dee203a79a50695f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void myKernel() { printf("Hello World!\n"); } int main() { hipLaunchKernelGGL(( myKernel), dim3(1),dim3(1), 0, 0, ); return 0; }
1f5b4b95e3959eb97259b530dee203a79a50695f.cu
#include <stdio.h> __global__ void myKernel() { printf("Hello World!\n"); } int main() { myKernel<<<1,1>>>(); return 0; }
275280771f672e3650067246b27a6163eece2d70.hip
// !!! This is a file automatically generated by hipify!!! /*Copyright (c) 2014, Edgar Solomonik, all rights reserved.*/ #include <complex> #include <assert.h> #include <stdio.h> #include "int_timer.h" #include <stdint.h> #include "offload.h" #include "../tensor/algstrct.h" #include "../interface/timer.h" #ifdef USE_ROCM #include <hip/hip_runtime.h> #include <rocblas.h> #include "device_launch_parameters.h" #endif namespace CTF_int{ volatile static int64_t int64_t_max = INT64_MAX; #ifndef PROFILE #define TAU_PROFILE(NAME,ARG,USER) #define TAU_PROFILE_TIMER(ARG1, ARG2, ARG3, ARG4) #define TAU_PROFILER_CREATE(ARG1, ARG2, ARG3, ARG4) #define TAU_PROFILE_STOP(ARG) #define TAU_PROFILE_START(ARG) #define TAU_PROFILE_SET_NODE(ARG) #define TAU_PROFILE_SET_CONTEXT(ARG) #define TAU_FSTART(ARG) #define TAU_FSTOP(ARG) #endif #define ABORT \ do{ \ assert(0); } while (0) #ifdef USE_ROCM int initialized = 0; hipblasHandle_t cuhandle; void offload_init(){ if (!initialized){ int ndev=0; hipError_t err = hipGetDeviceCount(&ndev); assert(err == hipSuccess); assert(ndev > 0); hipblasStatus_t status = hipblasCreate(&cuhandle); assert(status == HIPBLAS_STATUS_SUCCESS); } initialized = 1; } void offload_exit(){ if (initialized){ hipblasStatus_t status = hipblasDestroy(cuhandle); assert(status == HIPBLAS_STATUS_SUCCESS); initialized = 0; } } offload_tsr::offload_tsr(algstrct const * sr_, int64_t size_) : offload_arr(size_*sr_->el_size) { sr = sr_; size = size_; } /*offload_tsr::~offload_tsr(){ }*/ LinModel<2> upload_mdl(upload_mdl_init,"upload_mdl"); LinModel<2> download_mdl(download_mdl_init,"download_mdl"); double estimate_download_time(int64_t size){ double ps[] = {1.0, (double)size}; return download_mdl.est_time(ps); } double estimate_upload_time(int64_t size){ double ps[] = {1.0, (double)size}; return upload_mdl.est_time(ps); } template <typename dtype> __global__ void gset_zero(dtype *arr, int64_t size, dtype val) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=idx; i<size; i+= gridDim.x*blockDim.x) { arr[i]=val; } } void offload_tsr::set_zero(){ int blockSize = 256; int numBlocks = (size + blockSize - 1) / (size); TAU_FSTART(set_zero); switch (sr->el_size){ case 4: hipLaunchKernelGGL(( gset_zero), dim3(blockSize), dim3(numBlocks), 0, 0, (float*)dev_spr, size, ((float*)sr->addid())[0]); break; case 8: hipLaunchKernelGGL(( gset_zero), dim3(blockSize), dim3(numBlocks), 0, 0, (double*)dev_spr, size, ((double*)sr->addid())[0]); break; case 16: hipLaunchKernelGGL(( gset_zero), dim3(blockSize), dim3(numBlocks), 0, 0, (std::complex<double>*)dev_spr, size, ((std::complex<double>*)sr->addid())[0]); break; default: assert(0); break; } TAU_FSTOP(set_zero); } offload_arr::offload_arr(int64_t nbytes_){ nbytes = nbytes_; TAU_FSTART(offload_malloc); hipError_t err = hipMalloc((void**)&dev_spr, nbytes); TAU_FSTOP(offload_malloc); assert(err == hipSuccess); } offload_arr::~offload_arr(){ TAU_FSTART(offload_free); hipError_t err = hipFree(dev_spr); TAU_FSTOP(offload_free); assert(err == hipSuccess); } void offload_arr::download(char * host_spr){ assert(initialized); TAU_FSTART(cuda_download); double st_time = MPI_Wtime(); hipError_t err = hipMemcpy(host_spr, dev_spr, nbytes, hipMemcpyDeviceToHost); double exe_time = MPI_Wtime()-st_time; double tps[] = {exe_time, 1.0, (double)nbytes}; download_mdl.observe(tps); TAU_FSTOP(cuda_download); assert(err == hipSuccess); } void offload_arr::upload(char const * host_spr){ TAU_FSTART(cuda_upload); double st_time = MPI_Wtime(); hipError_t err = hipMemcpy(dev_spr, host_spr, nbytes, hipMemcpyHostToDevice); double exe_time = MPI_Wtime()-st_time; double tps[] = {exe_time, 1.0, (double)nbytes}; upload_mdl.observe(tps); TAU_FSTOP(cuda_upload); assert(err == hipSuccess); } void host_pinned_alloc(void ** ptr, int64_t size){ TAU_FSTART(host_pinned_malloc); hipError_t err = hipHostMalloc(ptr, size, hipHostMallocMapped); TAU_FSTOP(host_pinned_malloc); assert(err == hipSuccess); } void host_pinned_free(void * ptr){ TAU_FSTART(host_pinned_free); hipError_t err = hipHostFree(ptr); TAU_FSTOP(host_pinned_free); assert(err == hipSuccess); } #endif template void offload_gemm(char tA, char tB, int m, int n, int k, double alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, double beta, offload_tsr & C, int lda_C); template void offload_gemm(char tA, char tB, int m, int n, int k, std::complex<double> alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, std::complex<double> beta, offload_tsr & C, int lda_C); template <> void offload_gemm<double>(char tA, char tB, int m, int n, int k, double alpha, double const * dev_A, int lda_A, double const * dev_B, int lda_B, double beta, double * dev_C, int lda_C){ #ifdef USE_ROCM assert(initialized); hipblasOperation_t cuA; switch (tA){ case 'n': case 'N': cuA = HIPBLAS_OP_N; break; case 't': case 'T': cuA = HIPBLAS_OP_T; break; } hipblasOperation_t cuB; switch (tB){ case 'n': case 'N': cuB = HIPBLAS_OP_N; break; case 't': case 'T': cuB = HIPBLAS_OP_T; break; } hipblasStatus_t status = hipblasDgemm(cuhandle, cuA, cuB, m, n, k, &alpha, dev_A, lda_A, dev_B, lda_B, &beta, dev_C, lda_C); #ifdef PROFILE hipDeviceSynchronize(); #endif assert(status == HIPBLAS_STATUS_SUCCESS); #endif } template <> void offload_gemm< std::complex<double> >( char tA, char tB, int m, int n, int k, std::complex<double> alpha, std::complex<double> const * dev_A, int lda_A, std::complex<double> const * dev_B, int lda_B, std::complex<double> beta, std::complex<double> * dev_C, int lda_C){ #ifdef USE_ROCM assert(initialized); hipblasOperation_t cuA; switch (tA){ case 'n': case 'N': cuA = HIPBLAS_OP_N; break; case 't': case 'T': cuA = HIPBLAS_OP_T; break; case 'c': case 'C': cuA = HIPBLAS_OP_C; break; } hipblasOperation_t cuB; switch (tB){ case 'n': case 'N': cuB = HIPBLAS_OP_N; break; case 't': case 'T': cuB = HIPBLAS_OP_T; break; case 'c': case 'C': cuB = HIPBLAS_OP_C; break; } TAU_FSTART(cublas_zgemm); hipblasStatus_t status = hipblasZgemm(cuhandle, cuA, cuB, m, n, k, reinterpret_cast<hipDoubleComplex*>(&alpha), reinterpret_cast<const hipDoubleComplex*>(dev_A), lda_A, reinterpret_cast<const hipDoubleComplex*>(dev_B), lda_B, reinterpret_cast<hipDoubleComplex*>(&beta), reinterpret_cast<hipDoubleComplex*>(dev_C), lda_C); #ifdef PROFILE hipDeviceSynchronize(); #endif TAU_FSTOP(cublas_zgemm); assert(status == HIPBLAS_STATUS_SUCCESS); assert(status == HIPBLAS_STATUS_SUCCESS); #endif } template <typename dtype> void offload_gemm(char tA, char tB, int m, int n, int k, dtype alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, dtype beta, offload_tsr & C, int lda_C){ TAU_FSTART(cuda_gemm); offload_gemm(tA, tB, m, n, k, alpha, (dtype*)A.dev_spr, lda_A, (dtype*)B.dev_spr, lda_B, beta, (dtype*)C.dev_spr, lda_C); TAU_FSTOP(cuda_gemm); } }
275280771f672e3650067246b27a6163eece2d70.cu
/*Copyright (c) 2014, Edgar Solomonik, all rights reserved.*/ #include <complex> #include <assert.h> #include <stdio.h> #include "int_timer.h" #include <stdint.h> #include "offload.h" #include "../tensor/algstrct.h" #include "../interface/timer.h" #ifdef USE_CUDA #include <cuda_runtime.h> #include <cublas_v2.h> #include "device_launch_parameters.h" #endif namespace CTF_int{ volatile static int64_t int64_t_max = INT64_MAX; #ifndef PROFILE #define TAU_PROFILE(NAME,ARG,USER) #define TAU_PROFILE_TIMER(ARG1, ARG2, ARG3, ARG4) #define TAU_PROFILER_CREATE(ARG1, ARG2, ARG3, ARG4) #define TAU_PROFILE_STOP(ARG) #define TAU_PROFILE_START(ARG) #define TAU_PROFILE_SET_NODE(ARG) #define TAU_PROFILE_SET_CONTEXT(ARG) #define TAU_FSTART(ARG) #define TAU_FSTOP(ARG) #endif #define ABORT \ do{ \ assert(0); } while (0) #ifdef USE_CUDA int initialized = 0; cublasHandle_t cuhandle; void offload_init(){ if (!initialized){ int ndev=0; cudaError_t err = cudaGetDeviceCount(&ndev); assert(err == cudaSuccess); assert(ndev > 0); cublasStatus_t status = cublasCreate(&cuhandle); assert(status == CUBLAS_STATUS_SUCCESS); } initialized = 1; } void offload_exit(){ if (initialized){ cublasStatus_t status = cublasDestroy(cuhandle); assert(status == CUBLAS_STATUS_SUCCESS); initialized = 0; } } offload_tsr::offload_tsr(algstrct const * sr_, int64_t size_) : offload_arr(size_*sr_->el_size) { sr = sr_; size = size_; } /*offload_tsr::~offload_tsr(){ }*/ LinModel<2> upload_mdl(upload_mdl_init,"upload_mdl"); LinModel<2> download_mdl(download_mdl_init,"download_mdl"); double estimate_download_time(int64_t size){ double ps[] = {1.0, (double)size}; return download_mdl.est_time(ps); } double estimate_upload_time(int64_t size){ double ps[] = {1.0, (double)size}; return upload_mdl.est_time(ps); } template <typename dtype> __global__ void gset_zero(dtype *arr, int64_t size, dtype val) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=idx; i<size; i+= gridDim.x*blockDim.x) { arr[i]=val; } } void offload_tsr::set_zero(){ int blockSize = 256; int numBlocks = (size + blockSize - 1) / (size); TAU_FSTART(set_zero); switch (sr->el_size){ case 4: gset_zero<<<blockSize, numBlocks>>>((float*)dev_spr, size, ((float*)sr->addid())[0]); break; case 8: gset_zero<<<blockSize, numBlocks>>>((double*)dev_spr, size, ((double*)sr->addid())[0]); break; case 16: gset_zero<<<blockSize, numBlocks>>>((std::complex<double>*)dev_spr, size, ((std::complex<double>*)sr->addid())[0]); break; default: assert(0); break; } TAU_FSTOP(set_zero); } offload_arr::offload_arr(int64_t nbytes_){ nbytes = nbytes_; TAU_FSTART(offload_malloc); cudaError_t err = cudaMalloc((void**)&dev_spr, nbytes); TAU_FSTOP(offload_malloc); assert(err == cudaSuccess); } offload_arr::~offload_arr(){ TAU_FSTART(offload_free); cudaError_t err = cudaFree(dev_spr); TAU_FSTOP(offload_free); assert(err == cudaSuccess); } void offload_arr::download(char * host_spr){ assert(initialized); TAU_FSTART(cuda_download); double st_time = MPI_Wtime(); cudaError_t err = cudaMemcpy(host_spr, dev_spr, nbytes, cudaMemcpyDeviceToHost); double exe_time = MPI_Wtime()-st_time; double tps[] = {exe_time, 1.0, (double)nbytes}; download_mdl.observe(tps); TAU_FSTOP(cuda_download); assert(err == cudaSuccess); } void offload_arr::upload(char const * host_spr){ TAU_FSTART(cuda_upload); double st_time = MPI_Wtime(); cudaError_t err = cudaMemcpy(dev_spr, host_spr, nbytes, cudaMemcpyHostToDevice); double exe_time = MPI_Wtime()-st_time; double tps[] = {exe_time, 1.0, (double)nbytes}; upload_mdl.observe(tps); TAU_FSTOP(cuda_upload); assert(err == cudaSuccess); } void host_pinned_alloc(void ** ptr, int64_t size){ TAU_FSTART(host_pinned_malloc); cudaError_t err = cudaHostAlloc(ptr, size, cudaHostAllocMapped); TAU_FSTOP(host_pinned_malloc); assert(err == cudaSuccess); } void host_pinned_free(void * ptr){ TAU_FSTART(host_pinned_free); cudaError_t err = cudaFreeHost(ptr); TAU_FSTOP(host_pinned_free); assert(err == cudaSuccess); } #endif template void offload_gemm(char tA, char tB, int m, int n, int k, double alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, double beta, offload_tsr & C, int lda_C); template void offload_gemm(char tA, char tB, int m, int n, int k, std::complex<double> alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, std::complex<double> beta, offload_tsr & C, int lda_C); template <> void offload_gemm<double>(char tA, char tB, int m, int n, int k, double alpha, double const * dev_A, int lda_A, double const * dev_B, int lda_B, double beta, double * dev_C, int lda_C){ #ifdef USE_CUDA assert(initialized); cublasOperation_t cuA; switch (tA){ case 'n': case 'N': cuA = CUBLAS_OP_N; break; case 't': case 'T': cuA = CUBLAS_OP_T; break; } cublasOperation_t cuB; switch (tB){ case 'n': case 'N': cuB = CUBLAS_OP_N; break; case 't': case 'T': cuB = CUBLAS_OP_T; break; } cublasStatus_t status = cublasDgemm(cuhandle, cuA, cuB, m, n, k, &alpha, dev_A, lda_A, dev_B, lda_B, &beta, dev_C, lda_C); #ifdef PROFILE cudaDeviceSynchronize(); #endif assert(status == CUBLAS_STATUS_SUCCESS); #endif } template <> void offload_gemm< std::complex<double> >( char tA, char tB, int m, int n, int k, std::complex<double> alpha, std::complex<double> const * dev_A, int lda_A, std::complex<double> const * dev_B, int lda_B, std::complex<double> beta, std::complex<double> * dev_C, int lda_C){ #ifdef USE_CUDA assert(initialized); cublasOperation_t cuA; switch (tA){ case 'n': case 'N': cuA = CUBLAS_OP_N; break; case 't': case 'T': cuA = CUBLAS_OP_T; break; case 'c': case 'C': cuA = CUBLAS_OP_C; break; } cublasOperation_t cuB; switch (tB){ case 'n': case 'N': cuB = CUBLAS_OP_N; break; case 't': case 'T': cuB = CUBLAS_OP_T; break; case 'c': case 'C': cuB = CUBLAS_OP_C; break; } TAU_FSTART(cublas_zgemm); cublasStatus_t status = cublasZgemm(cuhandle, cuA, cuB, m, n, k, reinterpret_cast<cuDoubleComplex*>(&alpha), reinterpret_cast<const cuDoubleComplex*>(dev_A), lda_A, reinterpret_cast<const cuDoubleComplex*>(dev_B), lda_B, reinterpret_cast<cuDoubleComplex*>(&beta), reinterpret_cast<cuDoubleComplex*>(dev_C), lda_C); #ifdef PROFILE cudaDeviceSynchronize(); #endif TAU_FSTOP(cublas_zgemm); assert(status == CUBLAS_STATUS_SUCCESS); assert(status == CUBLAS_STATUS_SUCCESS); #endif } template <typename dtype> void offload_gemm(char tA, char tB, int m, int n, int k, dtype alpha, offload_tsr & A, int lda_A, offload_tsr & B, int lda_B, dtype beta, offload_tsr & C, int lda_C){ TAU_FSTART(cuda_gemm); offload_gemm(tA, tB, m, n, k, alpha, (dtype*)A.dev_spr, lda_A, (dtype*)B.dev_spr, lda_B, beta, (dtype*)C.dev_spr, lda_C); TAU_FSTOP(cuda_gemm); } }
42728c80bbb0f2971eceea329e6dfba9889cfd55.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifndef COMMON_H #include "../include/common.h" #endif /* __global__ void sum(int *a, int* b , int* c, size_t pitch, int xx, int yy){ // __shared__ int Pp[xx][yy]; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // int *arow = (int *) ((char*)a + x * pitch); // int *brow = (int *) ((char*)b + x * pitch); // int *crow = (int *) ((char*)c + x * pitch); // // crow[y] = brow[y] + arow[y]; // crow[y] = Pp[x][y]; } struct Matrix* MatrixSum(struct Matrix* first, struct Matrix* second){ int n = (first->x * first->y )/ (512 * 512) + 512; struct Matrix* tmp_d = MatrixAllocateOnDevice(first->x, first->y); dim3 block(1); dim3 block_s(5,5); sum <<<block, block_s>>> ( first->matrix, second->matrix, tmp_d->matrix, first->pitch, first->x, first->y); struct Matrix* tmp_h = GetMatrixFromDevice(tmp_d); return tmp_h; } struct Matrix* MatrixSub(struct Matrix* first, struct Matrix* second); struct Matrix* MatrixMul(struct Matrix* first, struct Matrix* second); struct Matrix* MatrixDiv(struct Matrix* first, struct Matrix* second); */
42728c80bbb0f2971eceea329e6dfba9889cfd55.cu
#include <stdio.h> #ifndef COMMON_H #include "../include/common.h" #endif /* __global__ void sum(int *a, int* b , int* c, size_t pitch, int xx, int yy){ // __shared__ int Pp[xx][yy]; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // int *arow = (int *) ((char*)a + x * pitch); // int *brow = (int *) ((char*)b + x * pitch); // int *crow = (int *) ((char*)c + x * pitch); // // crow[y] = brow[y] + arow[y]; // crow[y] = Pp[x][y]; } struct Matrix* MatrixSum(struct Matrix* first, struct Matrix* second){ int n = (first->x * first->y )/ (512 * 512) + 512; struct Matrix* tmp_d = MatrixAllocateOnDevice(first->x, first->y); dim3 block(1); dim3 block_s(5,5); sum <<<block, block_s>>> ( first->matrix, second->matrix, tmp_d->matrix, first->pitch, first->x, first->y); struct Matrix* tmp_h = GetMatrixFromDevice(tmp_d); return tmp_h; } struct Matrix* MatrixSub(struct Matrix* first, struct Matrix* second); struct Matrix* MatrixMul(struct Matrix* first, struct Matrix* second); struct Matrix* MatrixDiv(struct Matrix* first, struct Matrix* second); */
eddd911772a08d4c5ebe2704127c8a08b04036b0.hip
// !!! This is a file automatically generated by hipify!!! char *title = "odd-even sort"; char *description = " - (odd-even sort)"; /* - . , , .. p=n - ( ). , . ai (i = 1, .. , n), pi (i = 1, ... , n). - , - . - . - Q(1). n; Q(n). p n, n/p Q((n/p)log(n/p)). p (/2 , ) -: , ( ). 2 ; ( ), ( ). p */ #include <iostream> #include <cstdio> #include <cstdlib> #include <time.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #define assert( bool ) template<class T> __device__ void device_exchange(T *x, T *y, int count); template<class T>__device__ void device_copy(T *x, T *y, int count); template<class T> __device__ int device_comparer(T *x, T *y); template<class T> __device__ int device_indexator(T *x, int index, int len); template<class T> __device__ void device_bubble_sort(T *data, int index, int len, int n, int direction); template<class T> __global__ void global_oddeven_spliter(int * index, int n, int block_pairs); template<class T> __global__ void global_oddeven_preworker(T * data, int * index, int block_pairs, int direction); template<class T> __global__ void global_oddeven_worker(T * data, int * index, int block_pairs, int parity, int direction); template<class T> __host__ void host_oddeven_sort(int gridSize, int blockSize, T *data, int n, int direction); //////////////////////////////////////////////////////////////////////////////////////////// // // _comparer - // _indexator - // _non_parallel_sort - // _parallel_sort - #define fn_comparer device_comparer<long> #define fn_indexator device_indexator<long> #define fn_non_parallel_sort device_bubble_sort<long> #define fn_parallel_sort host_oddeven_sort<long> template<class T> __host__ void host_oddeven_sort(int gridSize, int blockSize, T *data, int n, int direction) { // data - // n - // direction - // -1 , // 1 hipError_t err; T *device_data[2]; int * device_index; // int block_length = max(1,(int)pow((double)n,0.33333333333)); // , // 2 // int block_pairs = (int)((n+(2*block_length)-1)/(2*block_length)); // , // 2*block_length int blocks = (gridSize > 0)? gridSize : min(max(1,(int)sqrt((double)block_pairs)),255); int threads = (blockSize > 0)? blockSize : (int)((block_pairs+blocks-1)/blocks); assert(n <= 2*block_length*block_pairs); assert(block_pairs <= blocks*threads); // - GPU err = hipMalloc((void**)&device_data[0], n*sizeof(T)); err = hipMalloc((void**)&device_data[1], n*sizeof(T)); hipMemcpy(device_data[0], data, n*sizeof(T), hipMemcpyHostToDevice); // - // , // 1 // err = hipMalloc((void**)&device_index, (2*block_pairs+1)*sizeof(int)); // hipLaunchKernelGGL(( global_oddeven_spliter<T>) , dim3(1), dim3(1) , 0, 0, device_index, n, block_pairs ); // hipLaunchKernelGGL(( global_oddeven_preworker<T>) , dim3(blocks), dim3(2*threads) , 0, 0, device_data[0], device_index, block_pairs, direction); for (int i = 0; i < 2*block_pairs; i++ ) { // // hipLaunchKernelGGL(( global_oddeven_worker<T>) , dim3(blocks), dim3(threads) , 0, 0, device_data[i&1],device_data[1-(i&1)], device_index, block_pairs, (i&1), direction); } // hipMemcpy(data, device_data[1], n*sizeof(T), hipMemcpyDeviceToHost); // hipFree(device_data[1]); hipFree(device_data[0]); hipFree(device_index); err = err; } template<class T> __global__ void global_oddeven_spliter( int * index, int n, int block_pairs) { index[2*block_pairs] = n; for(int i = 2*block_pairs; i > 0 ; i-- ) { index[i-1] = index[i] - (int)(index[i] / i) ; } } template<class T> __global__ void global_oddeven_preworker( T * data, int * index, int block_pairs, int direction) { // int id = blockDim.x*blockIdx.x + threadIdx.x; if (id < 2*block_pairs) { int n = index[2*block_pairs]; // id int start = index[id]; int len = index[id+1]-index[id]; // fn_non_parallel_sort(data, start, len, n, direction); } } // // // // // // // // // template<class T> __global__ void global_oddeven_worker( T * data0, T * data1, int * index, int block_pairs, int parity, int direction) { // int id = blockDim.x*blockIdx.x + threadIdx.x; if (id < block_pairs) { // - left right // // int left = (2*id+parity) % (2*block_pairs); int right = (2*id+1+parity) % (2*block_pairs); int index_left = index[left]; int index_right = index[right]; int size_left = index[left+1]-index[left]; int size_right = index[right+1]-index[right]; int start0 = (left<right)?index_left:index_right; int start1 = (left>right)?index_left:index_right; int total0 = (left<right)?size_left:size_right; int total1 = (left>right)?size_left:size_right; // while(size_left > 0 && size_right > 0) { int value = direction*fn_comparer(&data0[index_left+size_left-1],&data0[index_right+size_right-1]); if (value < 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_right+(--size_right)],1); } else { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_left+(--size_left)],1); } if (total1 > 0) total1--; else total0--; } while(size_left > 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_left+(--size_left)],1); if (total1 > 0) total1--; else total0--; } while(size_right > 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_right+(--size_right)],1); if (total1 > 0) total1--; else total0--; } // if (left<right) index[right] = (index[left]+index[right+1])>>1; } } // template<class T> __device__ void device_exchange(T *x, T *y, int count) { for(int i = 0; i < count ; i++ ) { T ch = x[i] ; x[i] = y[i] ; y[i] = ch; } } // template<class T> __device__ void device_copy(T *x, T *y, int count) { for(int i = 0; i < count ; i++ ) { x[i] = y[i] ; } } // // len index template<class T> __device__ int device_indexator(T *x, int index, int len) { assert(index+len <= sizeof(T)); return (int)((((*x) >> index) + (1 << (8 * sizeof(T)-index))) & ((1 << len) - 1)); } // x long // comparison function which returns a negative integer value if the first argument is less than the second, // a positive integer value if the first argument is greater than the second and zero if the arguments are equal. template<class T> __device__ int device_comparer(T *x, T *y) { if ((*x)<(*y)) return -1; else if ((*x)>(*y)) return 1; else return 0; } ///////////////////////////////////////////////////////////////// // // - n template<class T> __device__ void device_bubble_sort(T *data, int index, int len, int n, int direction) { if (index+len <= n) { for(int i = index ; i < index+len-1 ; i++ ) { for(int j = i + 1 ; j < index+len ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } } else { for(int i = 0 ; i < ((index+len) % n) ; i++ ) { for(int j = i + 1 ; j <= ((index+len)%n) ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } for(int j = index ; j < n ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } for(int i = index ; i < n-1 ; i++ ) { for(int j = i + 1 ; j < n ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } } } int main(int argc, char* argv[]) { int n; char *inputFileName; char *outputFileName; int gridSize = 0; int blockSize = 0; std::cout << title << std::endl; // Find/set the device. int device_count = 0; hipGetDeviceCount(&device_count); for (int i = 0; i < device_count; ++i) { hipDeviceProp_t properties; hipGetDeviceProperties(&properties, i); std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; } if (argc < 2){ printf("Usage :\t%s [-g <gridSize>] [-b <blockSize>] <inputfile> <outputfile>\n", argv[0]); fflush(stdout); exit(-1); } int argId = 1; for(; argId < argc && argv[argId][0]=='-' ; argId++){ switch(argv[argId][1]){ case 'g': gridSize = atoi(argv[++argId]); break; case 'b': blockSize = atoi(argv[++argId]); break; } } // - inputFileName = argv[argId++]; outputFileName = argv[argId++]; // { FILE *fl = fopen(inputFileName, "r"); n = 0; long v; while (fscanf(fl, "%ld", &v) == 1) n++; fclose(fl); } // n long long *arr = (long *)malloc(n*sizeof(long)); { printf("Title :\t%s\n", title); printf("Description :\t%s\n", description); printf("Array size :\t%d\n", n); printf("Input file name :\t%s\n", inputFileName); printf("Output file name :\t%s\n", outputFileName); printf("Grid Size :\t%d\n", gridSize); printf("Block Size :\t%d\n", blockSize); /* */ /* */ FILE *fl = fopen(inputFileName, "r"); for (int i = 0; i<n; i++) { fscanf(fl, "%ld", &arr[i]); } fclose(fl); } // fn_parallel_sort(gridSize, blockSize, arr, n, 1); /* */ { bool check = true; for (int i = 0; (i < (n - 1)) && check; i++) check = (arr[i] <= arr[i + 1]); printf("Array size :\t%d\n", n); printf("Check :\t%s\n", (check ? "ok" : "fail")); FILE *fl = fopen(outputFileName, "w"); for (int i = 0; i<n; i++) { fprintf(fl, "%ld\n", arr[i]); } fclose(fl); } // free(arr); hipDeviceReset(); exit(0); }
eddd911772a08d4c5ebe2704127c8a08b04036b0.cu
char *title = "odd-even sort"; char *description = "Сортировка четно-нечетными перестановками (odd-even sort)"; /* Для каждой итерации алгоритма операции сравнения-обмена для всех пар элементов независимы и выполняются одновременно. Рассмотрим случай, когда число процессоров равно числу элементов, т.е. p=n - число процессоров (сортируемых элементов). Предположим, что вычислительная система имеет топологию кольца. Пусть элементы ai (i = 1, .. , n), первоначально расположены на процессорах pi (i = 1, ... , n). В нечетной итерации каждый процессор с нечетным номером производит сравнение-обмен своего элемента с элементом, находящимся на процессоре-соседе справа. Аналогично в течение четной итерации каждый процессор с четным номером производит сравнение-обмен своего элемента с элементом правого соседа. На каждой итерации алгоритма нечетные и четные процессоры выполняют шаг сравнения-обмена с их правыми соседями за время Q(1). Общее количество таких итераций – n; поэтому время выполнения параллельной сортировки – Q(n). Когда число процессоров p меньше числа элементов n, то каждый из процессов получает свой блок данных n/p и сортирует его за время Q((n/p)·log(n/p)). Затем процессоры проходят p итераций (р/2 и чётных, и нечётных) и делают сравнивания-разбиения: смежные процессоры передают друг другу свои данные, а внутренне их сортируют (на каждой паре процессоров получаем одинаковые массивы). Затем удвоенный массив делится на 2 части; левый процессор обрабатывает далее только левую часть (с меньшими значениями данных), а правый – только правую (с большими значениями данных). Получаем отсортированный массив после p итераций */ #include <iostream> #include <cstdio> #include <cstdlib> #include <time.h> #include <cuda.h> #include <cuda_runtime.h> #define assert( bool ) template<class T> __device__ void device_exchange(T *x, T *y, int count); template<class T>__device__ void device_copy(T *x, T *y, int count); template<class T> __device__ int device_comparer(T *x, T *y); template<class T> __device__ int device_indexator(T *x, int index, int len); template<class T> __device__ void device_bubble_sort(T *data, int index, int len, int n, int direction); template<class T> __global__ void global_oddeven_spliter(int * index, int n, int block_pairs); template<class T> __global__ void global_oddeven_preworker(T * data, int * index, int block_pairs, int direction); template<class T> __global__ void global_oddeven_worker(T * data, int * index, int block_pairs, int parity, int direction); template<class T> __host__ void host_oddeven_sort(int gridSize, int blockSize, T *data, int n, int direction); //////////////////////////////////////////////////////////////////////////////////////////// // Настроечные аттрибуты // _comparer - функция сравнения двух элементов массива // _indexator - функция определения номера корзины для элемента массива // _non_parallel_sort - фунция сортировки без использования паралельных вычислений // _parallel_sort - фунция сортировки с использованием паралельных вычислений #define fn_comparer device_comparer<long> #define fn_indexator device_indexator<long> #define fn_non_parallel_sort device_bubble_sort<long> #define fn_parallel_sort host_oddeven_sort<long> template<class T> __host__ void host_oddeven_sort(int gridSize, int blockSize, T *data, int n, int direction) { // data - массив данных // n - количество элементов в исходном массиве для сортировки // direction - способ сортировки // -1 означает сортировку по убыванию, // 1 означает сортировку по возрастанию cudaError_t err; T *device_data[2]; int * device_index; // Определение оптимального разбиения на подмассивы int block_length = max(1,(int)pow((double)n,0.33333333333)); // Для реализации алгоритма нам потребуется массив состоящий из пар блоков, // то есть количество блоков должно быть кратно 2 // поскольку на одном шаге сортировки все блоки разбиваются на пары int block_pairs = (int)((n+(2*block_length)-1)/(2*block_length)); // Определим оптимальное разбиения на процессы, нити // одна нить в просессе будет сортировать массив длины 2*block_length int blocks = (gridSize > 0)? gridSize : min(max(1,(int)sqrt((double)block_pairs)),255); int threads = (blockSize > 0)? blockSize : (int)((block_pairs+blocks-1)/blocks); assert(n <= 2*block_length*block_pairs); assert(block_pairs <= blocks*threads); // Шаг первый - копируем исходный массив в память GPU err = cudaMalloc((void**)&device_data[0], n*sizeof(T)); err = cudaMalloc((void**)&device_data[1], n*sizeof(T)); cudaMemcpy(device_data[0], data, n*sizeof(T), cudaMemcpyHostToDevice); // Шаг второй - разделим все элементы массива между блоками // соответственно они будут содержать данные разной длины, // и нам понадобится 1 вспомогательный массив // с количеством элементов в предыдущих блоках err = cudaMalloc((void**)&device_index, (2*block_pairs+1)*sizeof(int)); // Равномерно распределим исходные данные между блоками global_oddeven_spliter<T> <<< 1, 1 >>>( device_index, n, block_pairs ); // запускаем параллельные задачи сортировки данных двух соседних блоков global_oddeven_preworker<T> <<< blocks, 2*threads >>>(device_data[0], device_index, block_pairs, direction); for (int i = 0; i < 2*block_pairs; i++ ) { // Запускаем параллельные процессы копирования левых и правых блоков // и сортировки получившихся массивов global_oddeven_worker<T> <<< blocks, threads >>>(device_data[i&1],device_data[1-(i&1)], device_index, block_pairs, (i&1), direction); } // Возвращаем результаты в исходный массив cudaMemcpy(data, device_data[1], n*sizeof(T), cudaMemcpyDeviceToHost); // Освобождаем память на устройстве cudaFree(device_data[1]); cudaFree(device_data[0]); cudaFree(device_index); err = err; } template<class T> __global__ void global_oddeven_spliter( int * index, int n, int block_pairs) { index[2*block_pairs] = n; for(int i = 2*block_pairs; i > 0 ; i-- ) { index[i-1] = index[i] - (int)(index[i] / i) ; } } template<class T> __global__ void global_oddeven_preworker( T * data, int * index, int block_pairs, int direction) { // Получаем идентификатор нити int id = blockDim.x*blockIdx.x + threadIdx.x; if (id < 2*block_pairs) { int n = index[2*block_pairs]; // Сортируем массив id int start = index[id]; int len = index[id+1]-index[id]; // Запускаем обычный алгоритм для сортировки части массива fn_non_parallel_sort(data, start, len, n, direction); } } // Рабочий процесс // Параметры // адрес исходного массива данных // адрес результирующего массива данных // адрес массива индексов блоков данных // Количество пар блоков // Чётность операции // Размер одного элемента // Направление сортировки template<class T> __global__ void global_oddeven_worker( T * data0, T * data1, int * index, int block_pairs, int parity, int direction) { // Получаем идентификатор нити int id = blockDim.x*blockIdx.x + threadIdx.x; if (id < block_pairs) { // Работаем с следующей парой блоков - left и right // Они могут идти последовательно друг за другом // либо быть разнесены в начало и конец массива int left = (2*id+parity) % (2*block_pairs); int right = (2*id+1+parity) % (2*block_pairs); int index_left = index[left]; int index_right = index[right]; int size_left = index[left+1]-index[left]; int size_right = index[right+1]-index[right]; int start0 = (left<right)?index_left:index_right; int start1 = (left>right)?index_left:index_right; int total0 = (left<right)?size_left:size_right; int total1 = (left>right)?size_left:size_right; // Запускаем алгоритм для слияния отсортированных частей массива while(size_left > 0 && size_right > 0) { int value = direction*fn_comparer(&data0[index_left+size_left-1],&data0[index_right+size_right-1]); if (value < 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_right+(--size_right)],1); } else { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_left+(--size_left)],1); } if (total1 > 0) total1--; else total0--; } while(size_left > 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_left+(--size_left)],1); if (total1 > 0) total1--; else total0--; } while(size_right > 0) { device_copy<T>(&data1[(total1>0)?(start1+total1-1):(start0+total0-1)],&data0[index_right+(--size_right)],1); if (total1 > 0) total1--; else total0--; } // Делим данные двух блоков между собой if (left<right) index[right] = (index[left]+index[right+1])>>1; } } // Перестановка двух блоков в памяти устройства template<class T> __device__ void device_exchange(T *x, T *y, int count) { for(int i = 0; i < count ; i++ ) { T ch = x[i] ; x[i] = y[i] ; y[i] = ch; } } // Копирование одного участка памяти в другой template<class T> __device__ void device_copy(T *x, T *y, int count) { for(int i = 0; i < count ; i++ ) { x[i] = y[i] ; } } // Определение номера карзины // Формируется положмтельное число из len бит с позиции index template<class T> __device__ int device_indexator(T *x, int index, int len) { assert(index+len <= sizeof(T)); return (int)((((*x) >> index) + (1 << (8 * sizeof(T)-index))) & ((1 << len) - 1)); } // Функция сравнения данных xранимых в памяти как целых чисел типа long // comparison function which returns ​a negative integer value if the first argument is less than the second, // a positive integer value if the first argument is greater than the second and zero if the arguments are equal. template<class T> __device__ int device_comparer(T *x, T *y) { if ((*x)<(*y)) return -1; else if ((*x)>(*y)) return 1; else return 0; } ///////////////////////////////////////////////////////////////// // Пузырьковая сортировка части массива // Особенность - поддерживат циклическую адресацию в массиве длины n template<class T> __device__ void device_bubble_sort(T *data, int index, int len, int n, int direction) { if (index+len <= n) { for(int i = index ; i < index+len-1 ; i++ ) { for(int j = i + 1 ; j < index+len ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } } else { for(int i = 0 ; i < ((index+len) % n) ; i++ ) { for(int j = i + 1 ; j <= ((index+len)%n) ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } for(int j = index ; j < n ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } for(int i = index ; i < n-1 ; i++ ) { for(int j = i + 1 ; j < n ; j++ ) { int value = direction*fn_comparer(&data[i],&data[j]); if (value > 0) device_exchange<T>(&data[i],&data[j],1); } } } } int main(int argc, char* argv[]) { int n; char *inputFileName; char *outputFileName; int gridSize = 0; int blockSize = 0; std::cout << title << std::endl; // Find/set the device. int device_count = 0; cudaGetDeviceCount(&device_count); for (int i = 0; i < device_count; ++i) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, i); std::cout << "Running on GPU " << i << " (" << properties.name << ")" << std::endl; } if (argc < 2){ printf("Usage :\t%s [-g <gridSize>] [-b <blockSize>] <inputfile> <outputfile>\n", argv[0]); fflush(stdout); exit(-1); } int argId = 1; for(; argId < argc && argv[argId][0]=='-' ; argId++){ switch(argv[argId][1]){ case 'g': gridSize = atoi(argv[++argId]); break; case 'b': blockSize = atoi(argv[++argId]); break; } } // Получаем параметры - имена файлов inputFileName = argv[argId++]; outputFileName = argv[argId++]; // Подсчитываем количество элементов в файле { FILE *fl = fopen(inputFileName, "r"); n = 0; long v; while (fscanf(fl, "%ld", &v) == 1) n++; fclose(fl); } // Создаём массив длины n чисел типа long long *arr = (long *)malloc(n*sizeof(long)); { printf("Title :\t%s\n", title); printf("Description :\t%s\n", description); printf("Array size :\t%d\n", n); printf("Input file name :\t%s\n", inputFileName); printf("Output file name :\t%s\n", outputFileName); printf("Grid Size :\t%d\n", gridSize); printf("Block Size :\t%d\n", blockSize); /* Заполняем массив числами */ /* Операция выполняется только на ведущем процессе */ FILE *fl = fopen(inputFileName, "r"); for (int i = 0; i<n; i++) { fscanf(fl, "%ld", &arr[i]); } fclose(fl); } // Сортируем массив по возрастанию fn_parallel_sort(gridSize, blockSize, arr, n, 1); /* Проверяем и выводим результаты */ { bool check = true; for (int i = 0; (i < (n - 1)) && check; i++) check = (arr[i] <= arr[i + 1]); printf("Array size :\t%d\n", n); printf("Check :\t%s\n", (check ? "ok" : "fail")); FILE *fl = fopen(outputFileName, "w"); for (int i = 0; i<n; i++) { fprintf(fl, "%ld\n", arr[i]); } fclose(fl); } // Высвобождаем массив free(arr); cudaDeviceReset(); exit(0); }
77038ece11cb46d2aa55b12f59878ecd63d77e76.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "_adam64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int t = 1; double eps = 1; double b1 = 1; double b2 = 1; double *fstm = NULL; hipMalloc(&fstm, XSIZE*YSIZE); double *scndm = NULL; hipMalloc(&scndm, XSIZE*YSIZE); double *dw = NULL; hipMalloc(&dw, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( _adam64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,t,eps,b1,b2,fstm,scndm,dw); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( _adam64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,t,eps,b1,b2,fstm,scndm,dw); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( _adam64), dim3(gridBlock),dim3(threadBlock), 0, 0, n,t,eps,b1,b2,fstm,scndm,dw); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
77038ece11cb46d2aa55b12f59878ecd63d77e76.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "_adam64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; int t = 1; double eps = 1; double b1 = 1; double b2 = 1; double *fstm = NULL; cudaMalloc(&fstm, XSIZE*YSIZE); double *scndm = NULL; cudaMalloc(&scndm, XSIZE*YSIZE); double *dw = NULL; cudaMalloc(&dw, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); _adam64<<<gridBlock,threadBlock>>>(n,t,eps,b1,b2,fstm,scndm,dw); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { _adam64<<<gridBlock,threadBlock>>>(n,t,eps,b1,b2,fstm,scndm,dw); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { _adam64<<<gridBlock,threadBlock>>>(n,t,eps,b1,b2,fstm,scndm,dw); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8f046274137fab65f18c660823280c1fe2dc2e4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 25-Oct-2011 14:59:20 // // user function __device__ #include "save_soln.h" // CUDA kernel function __global__ void op_cuda_save_soln( float *arg0, float *arg1, int offset_s, int set_size ) { float arg0_l[4]; float arg1_l[4]; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((float *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((float *)arg_s)[m+tid*4]; // user-supplied kernel call save_soln( arg0_l, arg1_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((float *)arg_s)[m+tid*4] = arg1_l[m]; for (int m=0; m<4; m++) arg1[tid+m*nelems+offset*4] = ((float *)arg_s)[tid+m*nelems]; } } // host stub function void op_par_loop_save_soln(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ if (OP_diags>2) { printf(" kernel routine w/o indirection: save_soln \n"); } // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(float)*4); nshared = MAX(nshared,sizeof(float)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = nshared*nthread; hipLaunchKernelGGL(( op_cuda_save_soln), dim3(nblocks),dim3(nthread),nshared, 0, (float *) arg0.data_d, (float *) arg1.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_save_soln execution failed\n"); // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(0); OP_kernels[0].name = name; OP_kernels[0].count += 1; OP_kernels[0].time += wall_t2 - wall_t1; OP_kernels[0].transfer += (float)set->size * arg0.size; OP_kernels[0].transfer += (float)set->size * arg1.size; }
8f046274137fab65f18c660823280c1fe2dc2e4a.cu
// // auto-generated by op2.m on 25-Oct-2011 14:59:20 // // user function __device__ #include "save_soln.h" // CUDA kernel function __global__ void op_cuda_save_soln( float *arg0, float *arg1, int offset_s, int set_size ) { float arg0_l[4]; float arg1_l[4]; int tid = threadIdx.x%OP_WARPSIZE; extern __shared__ char shared[]; char *arg_s = shared + offset_s*(threadIdx.x/OP_WARPSIZE); // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { int offset = n - tid; int nelems = MIN(OP_WARPSIZE,set_size-offset); // copy data into shared memory, then into local for (int m=0; m<4; m++) ((float *)arg_s)[tid+m*nelems] = arg0[tid+m*nelems+offset*4]; for (int m=0; m<4; m++) arg0_l[m] = ((float *)arg_s)[m+tid*4]; // user-supplied kernel call save_soln( arg0_l, arg1_l ); // copy back into shared memory, then to device for (int m=0; m<4; m++) ((float *)arg_s)[m+tid*4] = arg1_l[m]; for (int m=0; m<4; m++) arg1[tid+m*nelems+offset*4] = ((float *)arg_s)[tid+m*nelems]; } } // host stub function void op_par_loop_save_soln(char const *name, op_set set, op_arg arg0, op_arg arg1 ){ if (OP_diags>2) { printf(" kernel routine w/o indirection: save_soln \n"); } // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timers(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_0 int nthread = OP_BLOCK_SIZE_0; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // work out shared memory requirements per element int nshared = 0; nshared = MAX(nshared,sizeof(float)*4); nshared = MAX(nshared,sizeof(float)*4); // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = nshared*nthread; op_cuda_save_soln<<<nblocks,nthread,nshared>>>( (float *) arg0.data_d, (float *) arg1.data_d, offset_s, set->size ); cutilSafeCall(cudaThreadSynchronize()); cutilCheckMsg("op_cuda_save_soln execution failed\n"); // update kernel record op_timers(&cpu_t2, &wall_t2); op_timing_realloc(0); OP_kernels[0].name = name; OP_kernels[0].count += 1; OP_kernels[0].time += wall_t2 - wall_t1; OP_kernels[0].transfer += (float)set->size * arg0.size; OP_kernels[0].transfer += (float)set->size * arg1.size; }
1ab32c5386232a73488bb650fd7300ce6d2b08f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <assert.h> #include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ) { //Shared memory storage for one or more small vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size <= arrayLength; size <<= 1) { uint stride = size / 2; uint offset = threadIdx.x & (stride - 1); { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); stride >>= 1; } for (; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); if (offset >= stride) Comparator( s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeGlobal( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ) { uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; //Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if (stride < size / 2) { uint offset = global_comparatorI & ((size / 2) - 1); if (offset >= stride) { uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } } else { uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function extern "C" uint factorRadix2(uint *log2L, uint L); extern "C" void oddEvenMergeSort( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { //Nothing to sort if (arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert(SHARED_SIZE_LIMIT % arrayLength == 0); hipLaunchKernelGGL(( oddEvenMergeSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); } else { hipLaunchKernelGGL(( oddEvenMergeSortShared), dim3(blockCount), dim3(threadCount), 0, 0, d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) { //Unlike with bitonic sort, combining bitonic merge steps with //stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there are //dependencies between data elements crossing the SHARED_SIZE_LIMIT borders hipLaunchKernelGGL(( oddEvenMergeGlobal), dim3((batchSize * arrayLength) / 512), dim3(256), 0, 0, d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); } } }
1ab32c5386232a73488bb650fd7300ce6d2b08f0.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ //Based on http://www.iti.fh-flensburg.de/lang/algorithmen/sortieren/networks/oemen.htm #include <assert.h> #include <helper_cuda.h> #include "sortingNetworks_common.h" #include "sortingNetworks_common.cuh" //////////////////////////////////////////////////////////////////////////////// // Monolithic Bacther's sort kernel for short arrays fitting into shared memory //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeSortShared( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint dir ) { //Shared memory storage for one or more small vectors __shared__ uint s_key[SHARED_SIZE_LIMIT]; __shared__ uint s_val[SHARED_SIZE_LIMIT]; //Offset to the beginning of subbatch and load data d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x; s_key[threadIdx.x + 0] = d_SrcKey[ 0]; s_val[threadIdx.x + 0] = d_SrcVal[ 0]; s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)]; s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)]; for (uint size = 2; size <= arrayLength; size <<= 1) { uint stride = size / 2; uint offset = threadIdx.x & (stride - 1); { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); Comparator( s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], dir ); stride >>= 1; } for (; stride > 0; stride >>= 1) { __syncthreads(); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); if (offset >= stride) Comparator( s_key[pos - stride], s_val[pos - stride], s_key[pos + 0], s_val[pos + 0], dir ); } } __syncthreads(); d_DstKey[ 0] = s_key[threadIdx.x + 0]; d_DstVal[ 0] = s_val[threadIdx.x + 0]; d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)]; } //////////////////////////////////////////////////////////////////////////////// // Odd-even merge sort iteration kernel // for large arrays (not fitting into shared memory) //////////////////////////////////////////////////////////////////////////////// __global__ void oddEvenMergeGlobal( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint arrayLength, uint size, uint stride, uint dir ) { uint global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x; //Odd-even merge uint pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1)); if (stride < size / 2) { uint offset = global_comparatorI & ((size / 2) - 1); if (offset >= stride) { uint keyA = d_SrcKey[pos - stride]; uint valA = d_SrcVal[pos - stride]; uint keyB = d_SrcKey[pos + 0]; uint valB = d_SrcVal[pos + 0]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos - stride] = keyA; d_DstVal[pos - stride] = valA; d_DstKey[pos + 0] = keyB; d_DstVal[pos + 0] = valB; } } else { uint keyA = d_SrcKey[pos + 0]; uint valA = d_SrcVal[pos + 0]; uint keyB = d_SrcKey[pos + stride]; uint valB = d_SrcVal[pos + stride]; Comparator( keyA, valA, keyB, valB, dir ); d_DstKey[pos + 0] = keyA; d_DstVal[pos + 0] = valA; d_DstKey[pos + stride] = keyB; d_DstVal[pos + stride] = valB; } } //////////////////////////////////////////////////////////////////////////////// // Interface function //////////////////////////////////////////////////////////////////////////////// //Helper function extern "C" uint factorRadix2(uint *log2L, uint L); extern "C" void oddEvenMergeSort( uint *d_DstKey, uint *d_DstVal, uint *d_SrcKey, uint *d_SrcVal, uint batchSize, uint arrayLength, uint dir ) { //Nothing to sort if (arrayLength < 2) return; //Only power-of-two array lengths are supported by this implementation uint log2L; uint factorizationRemainder = factorRadix2(&log2L, arrayLength); assert(factorizationRemainder == 1); dir = (dir != 0); uint blockCount = (batchSize * arrayLength) / SHARED_SIZE_LIMIT; uint threadCount = SHARED_SIZE_LIMIT / 2; if (arrayLength <= SHARED_SIZE_LIMIT) { assert(SHARED_SIZE_LIMIT % arrayLength == 0); oddEvenMergeSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, arrayLength, dir); } else { oddEvenMergeSortShared<<<blockCount, threadCount>>>(d_DstKey, d_DstVal, d_SrcKey, d_SrcVal, SHARED_SIZE_LIMIT, dir); for (uint size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1) for (unsigned stride = size / 2; stride > 0; stride >>= 1) { //Unlike with bitonic sort, combining bitonic merge steps with //stride = [SHARED_SIZE_LIMIT / 2 .. 1] seems to be impossible as there are //dependencies between data elements crossing the SHARED_SIZE_LIMIT borders oddEvenMergeGlobal<<<(batchSize * arrayLength) / 512, 256>>>(d_DstKey, d_DstVal, d_DstKey, d_DstVal, arrayLength, size, stride, dir); } } }
e3809eaf1ceb9738096548ddae225d97a11e6f29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #include "Error.h" __global__ void cubeKernel(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx]= f * f * f; } void onDevice(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){ // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory HANDLER_ERROR_ERR(hipMalloc((void**) &d_in, ARRAY_BYTES)); HANDLER_ERROR_ERR(hipMalloc((void**) &d_out, ARRAY_BYTES)); // transfer the array to the GPU HANDLER_ERROR_ERR(hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice)); // launch the kernel hipLaunchKernelGGL(( cubeKernel), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in); HANDLER_ERROR_MSG( "Kernel Panic!!!" ); // copy back the result array to the CPU HANDLER_ERROR_ERR(hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost)); // free GPU memory pointers HANDLER_ERROR_ERR(hipFree(d_in)); HANDLER_ERROR_ERR(hipFree(d_out)); } void test(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){ // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { assert( h_out[i] == (h_in[i] * h_in[i] * h_in[i]) ); printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } printf("-: successful execution :-\n"); } void onHost(){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // declare CPU memory pointers float *h_in; float *h_out; // allocate CPU memory h_in = (float*)malloc(ARRAY_BYTES); h_out = (float*)malloc(ARRAY_BYTES); for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } // call the kernel onDevice(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES); test(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES); // free CPU memory pointers free(h_in); free(h_out); } int main(int argc, char ** argv) { onHost(); return 0; }
e3809eaf1ceb9738096548ddae225d97a11e6f29.cu
#include <stdio.h> #include <assert.h> #include "Error.h" __global__ void cubeKernel(float * d_out, float * d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx]= f * f * f; } void onDevice(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){ // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory HANDLER_ERROR_ERR(cudaMalloc((void**) &d_in, ARRAY_BYTES)); HANDLER_ERROR_ERR(cudaMalloc((void**) &d_out, ARRAY_BYTES)); // transfer the array to the GPU HANDLER_ERROR_ERR(cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice)); // launch the kernel cubeKernel<<<1, ARRAY_SIZE>>>(d_out, d_in); HANDLER_ERROR_MSG( "Kernel Panic!!!" ); // copy back the result array to the CPU HANDLER_ERROR_ERR(cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost)); // free GPU memory pointers HANDLER_ERROR_ERR(cudaFree(d_in)); HANDLER_ERROR_ERR(cudaFree(d_out)); } void test(float *h_in, float *h_out, int ARRAY_SIZE, int ARRAY_BYTES){ // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { assert( h_out[i] == (h_in[i] * h_in[i] * h_in[i]) ); printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } printf("-: successful execution :-\n"); } void onHost(){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // declare CPU memory pointers float *h_in; float *h_out; // allocate CPU memory h_in = (float*)malloc(ARRAY_BYTES); h_out = (float*)malloc(ARRAY_BYTES); for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } // call the kernel onDevice(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES); test(h_in, h_out, ARRAY_SIZE, ARRAY_BYTES); // free CPU memory pointers free(h_in); free(h_out); } int main(int argc, char ** argv) { onHost(); return 0; }
c1c67181472617b4521585ea7ee453f671625930.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "preprocess_kernel.cuh" namespace CUDAKernel{ Norm Norm::mean_std(float mean[3], float std[3]){ Norm out; out.type = NormType::MeanStd; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; return out; } __global__ void warp_affine_bilinear_and_normalize_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = (m_x1 * dx + m_y1 * dy + m_z1) + 0.5f; float src_y = (m_x2 * dx + m_y2 * dy + m_z2) + 0.5f; float c0, c1, c2; if(src_x < 0 || src_x >= src_width || src_y < 0 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floor(src_y); int x_low = floor(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float* pdst = dst + dy * dst_width + dx * 3; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f; c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f; c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f; } int type = (unsigned int)(norm.type) & 0x000000FF; int channel_order = (unsigned int)(norm.type) & 0x0000FF00; if(channel_order == int(NormType::InvertChannel)){ float t = c2; c2 = c0; c0 = t; } if(type == int(NormType::MeanStd)){ c0 = (c0 / 255.0f - norm.mean[0]) / norm.std[0]; c1 = (c1 / 255.0f - norm.mean[1]) / norm.std[1]; c2 = (c2 / 255.0f - norm.mean[2]) / norm.std[2]; }else if(type == int(NormType::AlphaBeta)){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static __device__ uint8_t cast(float value){ return value < 0 ? 0 : (value > 255 ? 255 : value); } static __global__ void convert_nv12_to_bgr_kernel(const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst_bgr, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; int ox = position % width; int oy = position / width; const uint8_t& yvalue = y[oy * linesize + ox]; int offset_uv = (oy >> 1) * linesize + (ox & 0xFFFFFFFE); const uint8_t& u = uv[offset_uv + 0]; const uint8_t& v = uv[offset_uv + 1]; dst_bgr[position * 3 + 0] = 1.164f * (yvalue - 16.0f) + 2.018f * (u - 128.0f); dst_bgr[position * 3 + 1] = 1.164f * (yvalue - 16.0f) - 0.813f * (v - 128.0f) - 0.391f * (u - 128.0f); dst_bgr[position * 3 + 2] = 1.164f * (yvalue - 16.0f) + 1.596f * (v - 128.0f); } ///////////////////////////////////////////////////////////////////////// void convert_nv12_to_bgr_invoke( const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst, hipStream_t stream){ int total = width * height; dim3 grid = CUDATools::grid_dims(total); dim3 block = CUDATools::block_dims(total); hipLaunchKernelGGL(( checkCudaKernel(convert_nv12_to_bgr_kernel), dim3(grid), dim3(block), 0, stream, y, uv, width, height, linesize, dst, total )); } void warp_affine_bilinear_and_normalize( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, hipStream_t stream) { int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } // void resize_bilinear( // uint8_t* src, int src_line_size, int src_width, int src_height, // uint8_t* dst, int dst_line_size, int dst_width, int dst_height, // hipStream_t stream) { // int jobs = dst_width * dst_height; // auto grid = CUDATools::grid_dims(jobs); // auto block = CUDATools::block_dims(jobs); // checkCudaKernel(resize_bilinear_kernel << <grid, block, 0, stream >> > ( // src, src_line_size, src_width, src_height, // dst, dst_line_size, dst_width, dst_height, src_width / (float)dst_width, src_height / (float)dst_height, jobs // )); // } };
c1c67181472617b4521585ea7ee453f671625930.cu
#include "preprocess_kernel.cuh" namespace CUDAKernel{ Norm Norm::mean_std(float mean[3], float std[3]){ Norm out; out.type = NormType::MeanStd; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; return out; } __global__ void warp_affine_bilinear_and_normalize_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = (m_x1 * dx + m_y1 * dy + m_z1) + 0.5f; float src_y = (m_x2 * dx + m_y2 * dy + m_z2) + 0.5f; float c0, c1, c2; if(src_x < 0 || src_x >= src_width || src_y < 0 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floor(src_y); int x_low = floor(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; float* pdst = dst + dy * dst_width + dx * 3; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f; c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f; c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f; } int type = (unsigned int)(norm.type) & 0x000000FF; int channel_order = (unsigned int)(norm.type) & 0x0000FF00; if(channel_order == int(NormType::InvertChannel)){ float t = c2; c2 = c0; c0 = t; } if(type == int(NormType::MeanStd)){ c0 = (c0 / 255.0f - norm.mean[0]) / norm.std[0]; c1 = (c1 / 255.0f - norm.mean[1]) / norm.std[1]; c2 = (c2 / 255.0f - norm.mean[2]) / norm.std[2]; }else if(type == int(NormType::AlphaBeta)){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static __device__ uint8_t cast(float value){ return value < 0 ? 0 : (value > 255 ? 255 : value); } static __global__ void convert_nv12_to_bgr_kernel(const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst_bgr, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; int ox = position % width; int oy = position / width; const uint8_t& yvalue = y[oy * linesize + ox]; int offset_uv = (oy >> 1) * linesize + (ox & 0xFFFFFFFE); const uint8_t& u = uv[offset_uv + 0]; const uint8_t& v = uv[offset_uv + 1]; dst_bgr[position * 3 + 0] = 1.164f * (yvalue - 16.0f) + 2.018f * (u - 128.0f); dst_bgr[position * 3 + 1] = 1.164f * (yvalue - 16.0f) - 0.813f * (v - 128.0f) - 0.391f * (u - 128.0f); dst_bgr[position * 3 + 2] = 1.164f * (yvalue - 16.0f) + 1.596f * (v - 128.0f); } ///////////////////////////////////////////////////////////////////////// void convert_nv12_to_bgr_invoke( const uint8_t* y, const uint8_t* uv, int width, int height, int linesize, uint8_t* dst, cudaStream_t stream){ int total = width * height; dim3 grid = CUDATools::grid_dims(total); dim3 block = CUDATools::block_dims(total); checkCudaKernel(convert_nv12_to_bgr_kernel<<<grid, block, 0, stream>>>( y, uv, width, height, linesize, dst, total )); } void warp_affine_bilinear_and_normalize( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = CUDATools::grid_dims(jobs); auto block = CUDATools::block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } // void resize_bilinear( // uint8_t* src, int src_line_size, int src_width, int src_height, // uint8_t* dst, int dst_line_size, int dst_width, int dst_height, // cudaStream_t stream) { // int jobs = dst_width * dst_height; // auto grid = CUDATools::grid_dims(jobs); // auto block = CUDATools::block_dims(jobs); // checkCudaKernel(resize_bilinear_kernel << <grid, block, 0, stream >> > ( // src, src_line_size, src_width, src_height, // dst, dst_line_size, dst_width, dst_height, src_width / (float)dst_width, src_height / (float)dst_height, jobs // )); // } };
6cedaaf2ee642090d133bb81d06529b4c0852c3b.hip
// !!! This is a file automatically generated by hipify!!! #include "timer.h" namespace kmeans { timer::timer() { hipEventCreate(&m_start); hipEventCreate(&m_stop); } timer::~timer() { hipEventDestroy(m_start); hipEventDestroy(m_stop); } void timer::start() { hipEventRecord(m_start, 0); } float timer::stop() { float time; hipEventRecord(m_stop, 0); hipEventSynchronize(m_stop); hipEventElapsedTime(&time, m_start, m_stop); return time; } }
6cedaaf2ee642090d133bb81d06529b4c0852c3b.cu
#include "timer.h" namespace kmeans { timer::timer() { cudaEventCreate(&m_start); cudaEventCreate(&m_stop); } timer::~timer() { cudaEventDestroy(m_start); cudaEventDestroy(m_stop); } void timer::start() { cudaEventRecord(m_start, 0); } float timer::stop() { float time; cudaEventRecord(m_stop, 0); cudaEventSynchronize(m_stop); cudaEventElapsedTime(&time, m_start, m_stop); return time; } }
bea82d5158f54be3b6b2280bcd72b0faa4c609ee.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <functions/sigmoid.cuh> #include <raft/cuda_utils.cuh> #include "test_utils.h" namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(data, len); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; raft::update_device(data, data_h, len, stream); raft::allocate(result, len); raft::allocate(result_ref, len); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; raft::update_device(result_ref, result_ref_h, len, stream); sigmoid(result, data, len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(data)); CUDA_CHECK(hipFree(result)); CUDA_CHECK(hipFree(result_ref)); } protected: SigmoidInputs<T> params; T *data, *result, *result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(raft::devArrMatch( result_ref, result, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(raft::devArrMatch( result_ref, result, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
bea82d5158f54be3b6b2280bcd72b0faa4c609ee.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <functions/sigmoid.cuh> #include <raft/cuda_utils.cuh> #include "test_utils.h" namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(data, len); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; raft::update_device(data, data_h, len, stream); raft::allocate(result, len); raft::allocate(result_ref, len); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; raft::update_device(result_ref, result_ref_h, len, stream); sigmoid(result, data, len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(data)); CUDA_CHECK(cudaFree(result)); CUDA_CHECK(cudaFree(result_ref)); } protected: SigmoidInputs<T> params; T *data, *result, *result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(raft::devArrMatch( result_ref, result, params.len, raft::CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(raft::devArrMatch( result_ref, result, params.len, raft::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
4b77628fd561466ad6e8b625b88593a3993e433d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include <stdio.h> #include <time.h> #include <assert.h> #include "network.h" #include "image.h" #include "data.h" #include "utils.h" #include "parser.h" #include "crop_layer.h" #include "connected_layer.h" #include "rnn_layer.h" #include "gru_layer.h" #include "crnn_layer.h" #include "detection_layer.h" #include "region_layer.h" #include "convolutional_layer.h" #include "activation_layer.h" #include "maxpool_layer.h" #include "reorg_layer.h" #include "avgpool_layer.h" #include "normalization_layer.h" #include "batchnorm_layer.h" #include "cost_layer.h" #include "local_layer.h" #include "softmax_layer.h" #include "dropout_layer.h" #include "route_layer.h" #include "shortcut_layer.h" #include "blas.h" } #ifdef OPENCV #include "opencv2/highgui/highgui_c.h" #endif float * get_network_output_gpu_layer(network net, int i); float * get_network_delta_gpu_layer(network net, int i); float * get_network_output_gpu(network net); void forward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; for(i = 0; i < net.n; ++i){ state.index = i; layer l = net.layers[i]; if(l.delta_gpu){ fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); } l.forward_gpu(l, state); if(net.wait_stream) hipStreamSynchronize(get_cuda_stream()); state.input = l.output_gpu; /* cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) { int j; for (j = 0; j < l.out_c; ++j) { image img = make_image(l.out_w, l.out_h, 3); memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); char buff[256]; sprintf(buff, "layer-%d slice-%d", i, j); show_image(img, buff); } cvWaitKey(0); // wait press-key in console cvDestroyAllWindows(); } */ } } void backward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; float * original_input = state.input; float * original_delta = state.delta; for(i = net.n-1; i >= 0; --i){ state.index = i; layer l = net.layers[i]; if (l.stopbackward) break; if(i == 0){ state.input = original_input; state.delta = original_delta; }else{ layer prev = net.layers[i-1]; state.input = prev.output_gpu; state.delta = prev.delta_gpu; } l.backward_gpu(l, state); } } void update_network_gpu(network net) { cuda_set_device(net.gpu_index); int i; int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); for(i = 0; i < net.n; ++i){ layer l = net.layers[i]; l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } } void forward_backward_network_gpu(network net, float *x, float *y) { network_state state; state.index = 0; state.net = net; int x_size = get_network_input_size(net)*net.batch; int y_size = get_network_output_size(net)*net.batch; if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch; if(!*net.input_gpu){ *net.input_gpu = cuda_make_array(x, x_size); *net.truth_gpu = cuda_make_array(y, y_size); }else{ cuda_push_array(*net.input_gpu, x, x_size); cuda_push_array(*net.truth_gpu, y, y_size); } state.input = *net.input_gpu; state.delta = 0; state.truth = *net.truth_gpu; state.train = 1; #ifdef CUDNN_HALF int i; for (i = 0; i < net.n; ++i) { layer l = net.layers[i]; cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); } #endif forward_network_gpu(net, state); //hipStreamSynchronize(get_cuda_stream()); backward_network_gpu(net, state); } float train_network_datum_gpu(network net, float *x, float *y) { *net.seen += net.batch; forward_backward_network_gpu(net, x, y); float error = get_network_cost(net); if (((*net.seen) / net.batch) % net.subdivisions == 0) { update_network_gpu(net); } return error; } typedef struct { network net; data d; float *err; } train_args; void *train_thread(void *ptr) { train_args args = *(train_args*)ptr; free(ptr); cuda_set_device(args.net.gpu_index); *args.err = train_network(args.net, args.d); return 0; } pthread_t train_network_in_thread(network net, data d, float *err) { pthread_t thread; train_args *ptr = (train_args *)calloc(1, sizeof(train_args)); ptr->net = net; ptr->d = d; ptr->err = err; if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed"); return thread; } void pull_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void push_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void update_layer(layer l, network net) { int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } void merge_weights(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1); if (l.scales) { axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1); } } void scale_weights(layer l, float s) { if (l.type == CONVOLUTIONAL) { scal_cpu(l.n, s, l.biases, 1); scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1); if (l.scales) { scal_cpu(l.n, s, l.scales, 1); } } else if(l.type == CONNECTED) { scal_cpu(l.outputs, s, l.biases, 1); scal_cpu(l.outputs*l.inputs, s, l.weights, 1); } } void pull_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.biases_gpu, l.biases, l.outputs); cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void push_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, l.biases, l.outputs); cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void distribute_weights(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, base.biases, l.n); cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c); if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, base.biases, l.outputs); cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs); } } void merge_updates(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1); if (l.scale_updates) { axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1); } } void distribute_updates(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c); if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs); } } void sync_layer(network *nets, int n, int j) { //printf("Syncing layer %d\n", j); int i; network net = nets[0]; layer base = net.layers[j]; cuda_set_device(net.gpu_index); pull_weights(base); for (i = 1; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; pull_weights(l); merge_weights(l, base); } scale_weights(base, 1./n); for (i = 0; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; distribute_weights(l, base); } //printf("Done syncing layer %d\n", j); } typedef struct{ network *nets; int n; int j; } sync_args; void *sync_layer_thread(void *ptr) { sync_args args = *(sync_args*)ptr; sync_layer(args.nets, args.n, args.j); free(ptr); return 0; } pthread_t sync_layer_in_thread(network *nets, int n, int j) { pthread_t thread; sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args)); ptr->nets = nets; ptr->n = n; ptr->j = j; if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed"); return thread; } void sync_nets(network *nets, int n, int interval) { int j; int layers = nets[0].n; pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t)); *nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions; for (j = 0; j < n; ++j){ *nets[j].seen = *nets[0].seen; } for (j = 0; j < layers; ++j) { threads[j] = sync_layer_in_thread(nets, n, j); } for (j = 0; j < layers; ++j) { pthread_join(threads[j], 0); } free(threads); } float train_networks(network *nets, int n, data d, int interval) { int i; int batch = nets[0].batch; int subdivisions = nets[0].subdivisions; assert(batch * subdivisions * n == d.X.rows); pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t)); float *errors = (float *) calloc(n, sizeof(float)); float sum = 0; for(i = 0; i < n; ++i){ data p = get_data_part(d, i, n); threads[i] = train_network_in_thread(nets[i], p, errors + i); } for(i = 0; i < n; ++i){ pthread_join(threads[i], 0); //printf("%f\n", errors[i]); sum += errors[i]; } //hipDeviceSynchronize(); if (get_current_batch(nets[0]) % interval == 0) { printf("Syncing... "); fflush(stdout); sync_nets(nets, n, interval); printf("Done!\n"); } //hipDeviceSynchronize(); free(threads); free(errors); return (float)sum/(n); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } float *network_predict_gpu(network net, float *input) { if (net.gpu_index != cuda_get_device()) cuda_set_device(net.gpu_index); int size = get_network_input_size(net) * net.batch; network_state state; state.index = 0; state.net = net; state.input = cuda_make_array(input, size); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu(net, state); float *out = get_network_output_gpu(net); cuda_free(state.input); return out; }
4b77628fd561466ad6e8b625b88593a3993e433d.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include <stdio.h> #include <time.h> #include <assert.h> #include "network.h" #include "image.h" #include "data.h" #include "utils.h" #include "parser.h" #include "crop_layer.h" #include "connected_layer.h" #include "rnn_layer.h" #include "gru_layer.h" #include "crnn_layer.h" #include "detection_layer.h" #include "region_layer.h" #include "convolutional_layer.h" #include "activation_layer.h" #include "maxpool_layer.h" #include "reorg_layer.h" #include "avgpool_layer.h" #include "normalization_layer.h" #include "batchnorm_layer.h" #include "cost_layer.h" #include "local_layer.h" #include "softmax_layer.h" #include "dropout_layer.h" #include "route_layer.h" #include "shortcut_layer.h" #include "blas.h" } #ifdef OPENCV #include "opencv2/highgui/highgui_c.h" #endif float * get_network_output_gpu_layer(network net, int i); float * get_network_delta_gpu_layer(network net, int i); float * get_network_output_gpu(network net); void forward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; for(i = 0; i < net.n; ++i){ state.index = i; layer l = net.layers[i]; if(l.delta_gpu){ fill_ongpu(l.outputs * l.batch, 0, l.delta_gpu, 1); } l.forward_gpu(l, state); if(net.wait_stream) cudaStreamSynchronize(get_cuda_stream()); state.input = l.output_gpu; /* cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); if (l.out_w >= 0 && l.out_h >= 1 && l.c >= 3) { int j; for (j = 0; j < l.out_c; ++j) { image img = make_image(l.out_w, l.out_h, 3); memcpy(img.data, l.output+ l.out_w*l.out_h*j, l.out_w*l.out_h * 1 * sizeof(float)); char buff[256]; sprintf(buff, "layer-%d slice-%d", i, j); show_image(img, buff); } cvWaitKey(0); // wait press-key in console cvDestroyAllWindows(); } */ } } void backward_network_gpu(network net, network_state state) { state.workspace = net.workspace; int i; float * original_input = state.input; float * original_delta = state.delta; for(i = net.n-1; i >= 0; --i){ state.index = i; layer l = net.layers[i]; if (l.stopbackward) break; if(i == 0){ state.input = original_input; state.delta = original_delta; }else{ layer prev = net.layers[i-1]; state.input = prev.output_gpu; state.delta = prev.delta_gpu; } l.backward_gpu(l, state); } } void update_network_gpu(network net) { cuda_set_device(net.gpu_index); int i; int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); for(i = 0; i < net.n; ++i){ layer l = net.layers[i]; l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } } void forward_backward_network_gpu(network net, float *x, float *y) { network_state state; state.index = 0; state.net = net; int x_size = get_network_input_size(net)*net.batch; int y_size = get_network_output_size(net)*net.batch; if(net.layers[net.n-1].truths) y_size = net.layers[net.n-1].truths*net.batch; if(!*net.input_gpu){ *net.input_gpu = cuda_make_array(x, x_size); *net.truth_gpu = cuda_make_array(y, y_size); }else{ cuda_push_array(*net.input_gpu, x, x_size); cuda_push_array(*net.truth_gpu, y, y_size); } state.input = *net.input_gpu; state.delta = 0; state.truth = *net.truth_gpu; state.train = 1; #ifdef CUDNN_HALF int i; for (i = 0; i < net.n; ++i) { layer l = net.layers[i]; cuda_convert_f32_to_f16(l.weights_gpu, l.c*l.n*l.size*l.size, l.weights_gpu16); } #endif forward_network_gpu(net, state); //cudaStreamSynchronize(get_cuda_stream()); backward_network_gpu(net, state); } float train_network_datum_gpu(network net, float *x, float *y) { *net.seen += net.batch; forward_backward_network_gpu(net, x, y); float error = get_network_cost(net); if (((*net.seen) / net.batch) % net.subdivisions == 0) { update_network_gpu(net); } return error; } typedef struct { network net; data d; float *err; } train_args; void *train_thread(void *ptr) { train_args args = *(train_args*)ptr; free(ptr); cuda_set_device(args.net.gpu_index); *args.err = train_network(args.net, args.d); return 0; } pthread_t train_network_in_thread(network net, data d, float *err) { pthread_t thread; train_args *ptr = (train_args *)calloc(1, sizeof(train_args)); ptr->net = net; ptr->d = d; ptr->err = err; if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed"); return thread; } void pull_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void push_updates(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.n*l.size*l.size*l.c); if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs); } } void update_layer(layer l, network net) { int update_batch = net.batch*net.subdivisions; float rate = get_current_rate(net); l.t = get_current_batch(net); if(l.update_gpu){ l.update_gpu(l, update_batch, rate, net.momentum, net.decay); } } void merge_weights(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weights, 1, base.weights, 1); if (l.scales) { axpy_cpu(l.n, 1, l.scales, 1, base.scales, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.biases, 1, base.biases, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weights, 1, base.weights, 1); } } void scale_weights(layer l, float s) { if (l.type == CONVOLUTIONAL) { scal_cpu(l.n, s, l.biases, 1); scal_cpu(l.n*l.size*l.size*l.c, s, l.weights, 1); if (l.scales) { scal_cpu(l.n, s, l.scales, 1); } } else if(l.type == CONNECTED) { scal_cpu(l.outputs, s, l.biases, 1); scal_cpu(l.outputs*l.inputs, s, l.weights, 1); } } void pull_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_pull_array(l.biases_gpu, l.biases, l.n); cuda_pull_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_pull_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_pull_array(l.biases_gpu, l.biases, l.outputs); cuda_pull_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void push_weights(layer l) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, l.biases, l.n); cuda_push_array(l.weights_gpu, l.weights, l.n*l.size*l.size*l.c); if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, l.biases, l.outputs); cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs); } } void distribute_weights(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.biases_gpu, base.biases, l.n); cuda_push_array(l.weights_gpu, base.weights, l.n*l.size*l.size*l.c); if(base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.biases_gpu, base.biases, l.outputs); cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs); } } void merge_updates(layer l, layer base) { if (l.type == CONVOLUTIONAL) { axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.n*l.size*l.size*l.c, 1, l.weight_updates, 1, base.weight_updates, 1); if (l.scale_updates) { axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1); } } else if(l.type == CONNECTED) { axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1); axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1); } } void distribute_updates(layer l, layer base) { if(l.type == CONVOLUTIONAL){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.n*l.size*l.size*l.c); if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n); } else if(l.type == CONNECTED){ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs); cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs); } } void sync_layer(network *nets, int n, int j) { //printf("Syncing layer %d\n", j); int i; network net = nets[0]; layer base = net.layers[j]; cuda_set_device(net.gpu_index); pull_weights(base); for (i = 1; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; pull_weights(l); merge_weights(l, base); } scale_weights(base, 1./n); for (i = 0; i < n; ++i) { cuda_set_device(nets[i].gpu_index); layer l = nets[i].layers[j]; distribute_weights(l, base); } //printf("Done syncing layer %d\n", j); } typedef struct{ network *nets; int n; int j; } sync_args; void *sync_layer_thread(void *ptr) { sync_args args = *(sync_args*)ptr; sync_layer(args.nets, args.n, args.j); free(ptr); return 0; } pthread_t sync_layer_in_thread(network *nets, int n, int j) { pthread_t thread; sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args)); ptr->nets = nets; ptr->n = n; ptr->j = j; if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed"); return thread; } void sync_nets(network *nets, int n, int interval) { int j; int layers = nets[0].n; pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t)); *nets[0].seen += interval * (n-1) * nets[0].batch * nets[0].subdivisions; for (j = 0; j < n; ++j){ *nets[j].seen = *nets[0].seen; } for (j = 0; j < layers; ++j) { threads[j] = sync_layer_in_thread(nets, n, j); } for (j = 0; j < layers; ++j) { pthread_join(threads[j], 0); } free(threads); } float train_networks(network *nets, int n, data d, int interval) { int i; int batch = nets[0].batch; int subdivisions = nets[0].subdivisions; assert(batch * subdivisions * n == d.X.rows); pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t)); float *errors = (float *) calloc(n, sizeof(float)); float sum = 0; for(i = 0; i < n; ++i){ data p = get_data_part(d, i, n); threads[i] = train_network_in_thread(nets[i], p, errors + i); } for(i = 0; i < n; ++i){ pthread_join(threads[i], 0); //printf("%f\n", errors[i]); sum += errors[i]; } //cudaDeviceSynchronize(); if (get_current_batch(nets[0]) % interval == 0) { printf("Syncing... "); fflush(stdout); sync_nets(nets, n, interval); printf("Done!\n"); } //cudaDeviceSynchronize(); free(threads); free(errors); return (float)sum/(n); } float *get_network_output_layer_gpu(network net, int i) { layer l = net.layers[i]; if(l.type != REGION) cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch); return l.output; } float *get_network_output_gpu(network net) { int i; for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break; return get_network_output_layer_gpu(net, i); } float *network_predict_gpu(network net, float *input) { if (net.gpu_index != cuda_get_device()) cuda_set_device(net.gpu_index); int size = get_network_input_size(net) * net.batch; network_state state; state.index = 0; state.net = net; state.input = cuda_make_array(input, size); state.truth = 0; state.train = 0; state.delta = 0; forward_network_gpu(net, state); float *out = get_network_output_gpu(net); cuda_free(state.input); return out; }
a812b596f5be0436e204c545613b7a01e0ad4502.hip
// !!! This is a file automatically generated by hipify!!! /* Implements the sequential cusp vectors. */ #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <hip/hip_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheckHost" /* Allocates space for the vector array on the Host if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheckHost(Vec v) { PetscErrorCode ierr; hipError_t err; PetscScalar *array; Vec_Seq *s; PetscInt n = v->map->n; PetscFunctionBegin; s = (Vec_Seq*)v->data; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (s->array == 0) { ierr = PetscMalloc1(n,&array);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));CHKERRQ(ierr); s->array = array; s->array_allocated = array; err = hipHostRegister(s->array, n*sizeof(PetscScalar),hipHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck" /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheck(Vec v) { hipError_t err; hipStream_t stream; Vec_Seq *s = (Vec_Seq*)v->data; PetscFunctionBegin; // First allocate memory on the GPU if needed if (!v->spptr) { try { v->spptr = new Vec_CUSP; ((Vec_CUSP*)v->spptr)->GPUarray = new CUSPARRAY; ((Vec_CUSP*)v->spptr)->GPUarray->resize((PetscBLASInt)v->map->n); err = hipStreamCreate(&stream);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->stream = stream; ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_FALSE; /* If the array is already allocated, one can register it as (page-locked) mapped. This can substantially accelerate data transfer across the PCI Express */ if (s->array) { err = hipHostRegister(s->array, v->map->n*sizeof(PetscScalar),hipHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } v->ops->destroy = VecDestroy_SeqCUSP; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU" /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUSPCopyToGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; hipStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = hipMemcpyAsync(varray->data().get(), *(PetscScalar**)v->data, v->map->n*sizeof(PetscScalar), hipMemcpyHostToDevice, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome" static PetscErrorCode VecCUSPCopyToGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; hipError_t err; PetscScalar *cpuPtr, *gpuPtr; hipStream_t stream; Vec_Seq *s; VecScatterCUSPIndices_PtoP ptop_scatter = (VecScatterCUSPIndices_PtoP)ci->scatter; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { stream=((Vec_CUSP*)v->spptr)->stream; s = (Vec_Seq*)v->data; ierr = PetscLogEventBegin(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); varray = ((Vec_CUSP*)v->spptr)->GPUarray; gpuPtr = varray->data().get() + ptop_scatter->recvLowestIndex; cpuPtr = s->array + ptop_scatter->recvLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = hipMemcpyAsync(gpuPtr, cpuPtr, ptop_scatter->nr*sizeof(PetscScalar), hipMemcpyHostToDevice, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->recvIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->recvIndicesGPU; thrust::copy(thrust::make_permutation_iterator(s->array,indicesCPU->begin()), thrust::make_permutation_iterator(s->array,indicesCPU->end()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin())); #endif // Set the buffer states v->valid_GPU_array = PETSC_CUSP_BOTH; ierr = PetscLogEventEnd(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPU" /* VecCUSPCopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUSPCopyFromGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; hipStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = hipMemcpyAsync(*(PetscScalar**)v->data, varray->data().get(), v->map->n*sizeof(PetscScalar), hipMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome" /* Note that this function only copies *some* of the values up from the GPU to CPU, which means that we need recombine the data at some point before using any of the standard functions. We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray where you have to always call in pairs */ PetscErrorCode VecCUSPCopyFromGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; hipError_t err; PetscScalar *cpuPtr, *gpuPtr; hipStream_t stream; Vec_Seq *s; VecScatterCUSPIndices_PtoP ptop_scatter = (VecScatterCUSPIndices_PtoP)ci->scatter; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); stream=((Vec_CUSP*)v->spptr)->stream; varray=((Vec_CUSP*)v->spptr)->GPUarray; s = (Vec_Seq*)v->data; gpuPtr = varray->data().get() + ptop_scatter->sendLowestIndex; cpuPtr = s->array + ptop_scatter->sendLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = hipMemcpyAsync(cpuPtr, gpuPtr, ptop_scatter->ns*sizeof(PetscScalar), hipMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->sendIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->sendIndicesGPU; thrust::copy(thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->end()), thrust::make_permutation_iterator(s->array,indicesCPU->begin())); #endif ierr = VecCUSPRestoreArrayRead(v,&varray);CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP_Private" static PetscErrorCode VecCopy_SeqCUSP_Private(Vec xin,Vec yin) { PetscScalar *ya; const PetscScalar *xa; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(xin); ierr = VecCUSPAllocateCheckHost(yin); if (xin != yin) { ierr = VecGetArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecGetArray(yin,&ya);CHKERRQ(ierr); ierr = PetscMemcpy(ya,xa,xin->map->n*sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecRestoreArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecRestoreArray(yin,&ya);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP_Private" static PetscErrorCode VecSetRandom_SeqCUSP_Private(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscInt n = xin->map->n,i; PetscScalar *xx; PetscFunctionBegin; ierr = VecGetArray(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) {ierr = PetscRandomGetValue(r,&xx[i]);CHKERRQ(ierr);} ierr = VecRestoreArray(xin,&xx);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP_Private" static PetscErrorCode VecDestroy_SeqCUSP_Private(Vec v) { Vec_Seq *vs = (Vec_Seq*)v->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectSAWsViewOff(v);CHKERRQ(ierr); #if defined(PETSC_USE_LOG) PetscLogObjectState((PetscObject)v,"Length=%D",v->map->n); #endif if (vs->array_allocated) ierr = PetscFree(vs->array_allocated);CHKERRQ(ierr); ierr = PetscFree(vs);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP_Private" static PetscErrorCode VecResetArray_SeqCUSP_Private(Vec vin) { Vec_Seq *v = (Vec_Seq*)vin->data; PetscFunctionBegin; v->array = v->unplacedarray; v->unplacedarray = 0; PetscFunctionReturn(0); } /* these following 3 public versions are necessary because we use CUSP in the regular PETSc code and these need to be called from plain C code. */ #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck_Public" PetscErrorCode VecCUSPAllocateCheck_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU_Public" PetscErrorCode VecCUSPCopyToGPU_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome_Public" /* VecCUSPCopyToGPUSome_Public - Copies certain entries down to the GPU from the CPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyToGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome_Public" /* VecCUSPCopyFromGPUSome_Public - Copies certain entries up to the CPU from the GPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyFromGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC VECSEQCUSP - VECSEQCUSP = "seqcusp" - The basic sequential vector, modified to use CUSP Options Database Keys: . -vec_type seqcusp - sets the vector type to VECSEQCUSP during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq() M*/ /* for VecAYPX_SeqCUSP*/ namespace cusp { namespace blas { namespace detail { template <typename T> struct AYPX : public thrust::binary_function<T,T,T> { T alpha; AYPX(T _alpha) : alpha(_alpha) {} __host__ __device__ T operator()(T x, T y) { return alpha * y + x; } }; } template <typename ForwardIterator1, typename ForwardIterator2, typename ScalarType> void aypx(ForwardIterator1 first1,ForwardIterator1 last1,ForwardIterator2 first2,ScalarType alpha) { thrust::transform(first1,last1,first2,first2,detail::AYPX<ScalarType>(alpha)); } template <typename Array1, typename Array2, typename ScalarType> void aypx(const Array1& x, Array2& y, ScalarType alpha) { detail::assert_same_dimensions(x,y); aypx(x.begin(),x.end(),y.begin(),alpha); } } } #undef __FUNCT__ #define __FUNCT__ "VecAYPX_SeqCUSP" PetscErrorCode VecAYPX_SeqCUSP(Vec yin, PetscScalar alpha, Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { if (alpha != 0.0) { cusp::blas::aypx(*xarray,*yarray,alpha); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } else { cusp::blas::copy(*xarray,*yarray); } ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecAXPY_SeqCUSP" PetscErrorCode VecAXPY_SeqCUSP(Vec yin,PetscScalar alpha,Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpy(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPPointwiseDivide { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) / thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecPointwiseDivide_SeqCUSP" PetscErrorCode VecPointwiseDivide_SeqCUSP(Vec win, Vec xin, Vec yin) { CUSPARRAY *warray=NULL,*xarray=NULL,*yarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), xarray->begin(), yarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), xarray->end(), yarray->end())), VecCUSPPointwiseDivide()); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } struct VecCUSPWAXPY { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t)*thrust::get<3>(t); } }; struct VecCUSPSum { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t); } }; struct VecCUSPDiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) - thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecWAXPY_SeqCUSP" PetscErrorCode VecWAXPY_SeqCUSP(Vec win,PetscScalar alpha,Vec xin, Vec yin) { CUSPARRAY *xarray=NULL,*yarray=NULL,*warray=NULL; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecCopy_SeqCUSP(yin,win);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPSum()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else if (alpha == -1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPDiff()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), thrust::make_constant_iterator(alpha), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), thrust::make_constant_iterator(alpha), xarray->end())), VecCUSPWAXPY()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* These functions are for the CUSP implementation of MAXPY with the loop unrolled on the CPU */ struct VecCUSPMAXPY4 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + 13*x3 +a4*x4 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t)+thrust::get<7>(t)*thrust::get<8>(t); } }; struct VecCUSPMAXPY3 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + a3*x3 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t); } }; struct VecCUSPMAXPY2 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2*/ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecMAXPY_SeqCUSP" PetscErrorCode VecMAXPY_SeqCUSP(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; CUSPARRAY *xarray,*yy0,*yy1,*yy2,*yy3; PetscInt n = xin->map->n,j,j_rem; PetscScalar alpha0,alpha1,alpha2,alpha3; PetscFunctionBegin; ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); switch (j_rem=nv&0x3) { case 3: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha += 3; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end())), VecCUSPMAXPY3()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); y += 3; break; case 2: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha +=2; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end())), VecCUSPMAXPY2()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } y +=2; break; case 1: alpha0 = *alpha++; ierr = VecAXPY_SeqCUSP(xin,alpha0,y[0]); y +=1; break; } for (j=j_rem; j<nv; j+=4) { alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha3 = alpha[3]; alpha += 4; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[3],&yy3);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin(), thrust::make_constant_iterator(alpha3), yy3->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end(), thrust::make_constant_iterator(alpha3), yy3->end())), VecCUSPMAXPY4()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[3],&yy3);CHKERRQ(ierr); y += 4; } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDot_SeqCUSP" PetscErrorCode VecDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; // PetscScalar *xptr,*yptr,*zgpu; //PetscReal tmp; PetscFunctionBegin; //VecNorm_SeqCUSP(xin, NORM_2, &tmp); //VecNorm_SeqCUSP(yin, NORM_2, &tmp); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) *z = cusp::blas::dotc(*yarray,*xarray); #else *z = cusp::blas::dot(*yarray,*xarray); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n >0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 // M = 2: __global__ void VecMDot_SeqCUSP_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUSP_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUSP_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUSP_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #undef __FUNCT__ #define __FUNCT__ "VecMDot_SeqCUSP" PetscErrorCode VecMDot_SeqCUSP(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,j,n = xin->map->n,current_y_index = 0; CUSPARRAY *xarray,*y0array,*y1array,*y2array,*y3array,*y4array,*y5array,*y6array,*y7array; PetscScalar *group_results_gpu,*xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel hipError_t cuda_ierr; PetscFunctionBegin; // allocate scratchpad memory for the results of individual work groups: if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUSP not positive."); cuda_ierr = hipMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not allocate CUDA work memory. Error code: %d", (int)cuda_ierr); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); xptr = thrust::raw_pointer_cast(xarray->data()); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel4), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<4; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel3), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<3; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel2), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<2; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dotc(*y0array, *xarray); #else z[current_y_index] = cusp::blas::dot(*xarray, *y0array); #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); z[current_y_index+4] = cusp::blas::dot(*y4array,*xarray); z[current_y_index+5] = cusp::blas::dot(*y5array,*xarray); z[current_y_index+6] = cusp::blas::dot(*y6array,*xarray); z[current_y_index+7] = cusp::blas::dot(*y7array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); y4ptr = thrust::raw_pointer_cast(y4array->data()); y5ptr = thrust::raw_pointer_cast(y5array->data()); y6ptr = thrust::raw_pointer_cast(y6array->data()); y7ptr = thrust::raw_pointer_cast(y7array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel8), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<8; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); current_y_index += 8; break; } } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); cuda_ierr = hipFree(group_results_gpu); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host: %d", (int)cuda_ierr); ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM #undef __FUNCT__ #define __FUNCT__ "VecSet_SeqCUSP" PetscErrorCode VecSet_SeqCUSP(Vec xin,PetscScalar alpha) { CUSPARRAY *xarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; /* if there's a faster way to do the case alpha=0.0 on the GPU we should do that*/ ierr = VecCUSPGetArrayWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::fill(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayWrite(xin,&xarray); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScale_SeqCUSP" PetscErrorCode VecScale_SeqCUSP(Vec xin, PetscScalar alpha) { CUSPARRAY *xarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecSet_SeqCUSP(xin,alpha);CHKERRQ(ierr); } else if (alpha != 1.0) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::scal(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecTDot_SeqCUSP" PetscErrorCode VecTDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; //#if defined(PETSC_USE_COMPLEX) /*Not working for complex*/ //#else ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { *z = cusp::blas::dot(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } //#endif ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n > 0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP" PetscErrorCode VecCopy_SeqCUSP(Vec xin,Vec yin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (xin != yin) { if (xin->valid_GPU_array == PETSC_CUSP_GPU) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU if we are on the CPU*/ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_GPU) { /* copy in GPU */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSwap_SeqCUSP" PetscErrorCode VecSwap_SeqCUSP(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) hipblasCswap(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuFloatComplex*)VecCUSPCastToRawPtr(*yarray),one); #else hipblasZswap(bn,(hipDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one,(hipDoubleComplex*)VecCUSPCastToRawPtr(*yarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) hipblasSswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #else hipblasDswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #endif #endif ierr = hipblasGetError();CHKERRCUSP(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPAX { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBY_SeqCUSP" PetscErrorCode VecAXPBY_SeqCUSP(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; if (a == 0.0) { ierr = VecScale_SeqCUSP(yin,beta);CHKERRQ(ierr); } else if (b == 1.0) { ierr = VecAXPY_SeqCUSP(yin,alpha,xin);CHKERRQ(ierr); } else if (a == 1.0) { ierr = VecAYPX_SeqCUSP(yin,beta,xin);CHKERRQ(ierr); } else if (b == 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( yarray->begin(), thrust::make_constant_iterator(a), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( yarray->end(), thrust::make_constant_iterator(a), xarray->end())), VecCUSPAX()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpby(*xarray,*yarray,*yarray,a,b); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* structs below are for special cases of VecAXPBYPCZ_SeqCUSP */ struct VecCUSPXPBYPCZ { /* z = x + b*y + c*z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<0>(t)+thrust::get<2>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; struct VecCUSPAXPBYPZ { /* z = ax + b*y + z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) += thrust::get<2>(t)*thrust::get<1>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBYPCZ_SeqCUSP" PetscErrorCode VecAXPBYPCZ_SeqCUSP(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; CUSPARRAY *xarray,*yarray,*zarray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(zin,&zarray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), thrust::make_constant_iterator(gamma), xarray->begin(), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), thrust::make_constant_iterator(gamma), xarray->end(), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPXPBYPCZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else if (gamma == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), xarray->begin(), thrust::make_constant_iterator(alpha), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), xarray->end(), thrust::make_constant_iterator(alpha), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPAXPBYPZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else { try { cusp::blas::axpbypcz(*xarray,*yarray,*zarray,*zarray,alpha,beta,gamma); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(zin,&zarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPointwiseMult_SeqCUSP" PetscErrorCode VecPointwiseMult_SeqCUSP(Vec win,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = win->map->n; CUSPARRAY *xarray,*yarray,*warray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(win,&warray);CHKERRQ(ierr); try { cusp::blas::xmy(*xarray,*yarray,*warray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogFlops(n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cusp */ #undef __FUNCT__ #define __FUNCT__ "VecNorm_SeqCUSP" PetscErrorCode VecNorm_SeqCUSP(Vec xin,NormType type,PetscReal *z) { const PetscScalar *xx; PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn; CUSPARRAY *xarray; PetscFunctionBegin; ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); try { *z = cusp::blas::nrm2(*xarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { PetscInt i; PetscReal max = 0.0,tmp; ierr = VecGetArrayRead(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) { if ((tmp = PetscAbsScalar(*xx)) > max) max = tmp; /* check special case of tmp == NaN */ if (tmp != tmp) {max = tmp; break;} xx++; } ierr = VecRestoreArrayRead(xin,&xx);CHKERRQ(ierr); *z = max; } else if (type == NORM_1) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) *z = hipblasScasum(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one); #else *z = hipblasDzasum(bn,(hipDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) *z = hipblasSasum(bn,VecCUSPCastToRawPtr(*xarray),one); #else *z = hipblasDasum(bn,VecCUSPCastToRawPtr(*xarray),one); #endif #endif ierr = hipblasGetError();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUSP(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUSP(xin,NORM_2,z+1);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*the following few functions should be modified to actually work with the GPU so they don't force unneccesary allocation of CPU memory */ #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP" PetscErrorCode VecSetRandom_SeqCUSP(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecSetRandom_SeqCUSP_Private(xin,r);CHKERRQ(ierr); xin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP" PetscErrorCode VecResetArray_SeqCUSP(Vec vin) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecResetArray_SeqCUSP_Private(vin);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPlaceArray_SeqCUSP" PetscErrorCode VecPlaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecPlaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecReplaceArray_SeqCUSP" PetscErrorCode VecReplaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecReplaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreateSeqCUSP" /*@ VecCreateSeqCUSP - Creates a standard, sequential array-style vector. Collective on MPI_Comm Input Parameter: + comm - the communicator, should be PETSC_COMM_SELF - n - the vector length Output Parameter: . V - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. Level: intermediate Concepts: vectors^creating sequential .seealso: VecCreateMPI(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost() @*/ PetscErrorCode VecCreateSeqCUSP(MPI_Comm comm,PetscInt n,Vec *v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreate(comm,v);CHKERRQ(ierr); ierr = VecSetSizes(*v,n,n);CHKERRQ(ierr); ierr = VecSetType(*v,VECSEQCUSP);CHKERRQ(ierr); PetscFunctionReturn(0); } /*The following template functions are for VecDotNorm2_SeqCUSP. Note that there is no complex support as currently written*/ template <typename T> struct cuspdotnormcalculate : thrust::unary_function<T,T> { __host__ __device__ T operator()(T x) { #if defined(PETSC_USE_COMPLEX) //return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #else return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #endif } }; template <typename T> struct cuspdotnormreduce : thrust::binary_function<T,T,T> { __host__ __device__ T operator()(T x,T y) { return thrust::make_tuple(thrust::get<0>(x)+thrust::get<0>(y), thrust::get<1>(x)+thrust::get<1>(y)); } }; #undef __FUNCT__ #define __FUNCT__ "VecDotNorm2_SeqCUSP" PetscErrorCode VecDotNorm2_SeqCUSP(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscScalar zero = 0.0; PetscReal n=s->map->n; thrust::tuple<PetscScalar,PetscScalar> result; CUSPARRAY *sarray,*tarray; PetscFunctionBegin; /*ierr = VecCUSPCopyToGPU(s);CHKERRQ(ierr); ierr = VecCUSPCopyToGPU(t);CHKERRQ(ierr);*/ ierr = VecCUSPGetArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(t,&tarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) ierr = VecDot_SeqCUSP(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUSP(t,t,nm);CHKERRQ(ierr); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*dp),PetscImaginaryPart(*dp)); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*nm),PetscImaginaryPart(*nm)); #else result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( sarray->begin(), tarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( sarray->end(), tarray->end())), cuspdotnormcalculate<thrust::tuple<PetscScalar,PetscScalar> >(), thrust::make_tuple(zero,zero), /*init */ cuspdotnormreduce<thrust::tuple<PetscScalar, PetscScalar> >()); /* binary function */ *dp = thrust::get<0>(result); *nm = thrust::get<1>(result); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(t,&tarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDuplicate_SeqCUSP" PetscErrorCode VecDuplicate_SeqCUSP(Vec win,Vec *V) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateSeqCUSP(PetscObjectComm((PetscObject)win),win->map->n,V);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*V)->map);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*V))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*V))->qlist);CHKERRQ(ierr); (*V)->stash.ignorenegidx = win->stash.ignorenegidx; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP" PetscErrorCode VecDestroy_SeqCUSP(Vec v) { PetscErrorCode ierr; Vec_Seq *s = (Vec_Seq*)v->data; hipError_t err; PetscFunctionBegin; try { if (v->spptr) { delete ((Vec_CUSP*)v->spptr)->GPUarray; err = hipStreamDestroy(((Vec_CUSP*)v->spptr)->stream);CHKERRCUSP(err); /* If the host array has been registered as (page-locked) mapped, one must unregister the buffer */ if (((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked) { err = hipHostUnregister(s->array);CHKERRCUSP(err); } delete (Vec_CUSP*) v->spptr; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecDestroy_SeqCUSP_Private(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return cusp::conj(x); } }; #endif #undef __FUNCT__ #define __FUNCT__ "VecConjugate_SeqCUSP" PetscErrorCode VecConjugate_SeqCUSP(Vec xin) { PetscErrorCode ierr; CUSPARRAY *xarray; PetscFunctionBegin; ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) thrust::transform(xarray->begin(), xarray->end(), xarray->begin(), conjugate()); #endif ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreate_SeqCUSP" PETSC_EXTERN PetscErrorCode VecCreate_SeqCUSP(Vec V) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)V),&size);CHKERRQ(ierr); if (size > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot create VECSEQCUSP on more than one process"); ierr = VecCreate_Seq_Private(V,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)V,VECSEQCUSP);CHKERRQ(ierr); V->ops->dot = VecDot_SeqCUSP; V->ops->norm = VecNorm_SeqCUSP; V->ops->tdot = VecTDot_SeqCUSP; V->ops->scale = VecScale_SeqCUSP; V->ops->copy = VecCopy_SeqCUSP; V->ops->set = VecSet_SeqCUSP; V->ops->swap = VecSwap_SeqCUSP; V->ops->axpy = VecAXPY_SeqCUSP; V->ops->axpby = VecAXPBY_SeqCUSP; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUSP; V->ops->pointwisemult = VecPointwiseMult_SeqCUSP; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUSP; V->ops->setrandom = VecSetRandom_SeqCUSP; V->ops->dot_local = VecDot_SeqCUSP; V->ops->tdot_local = VecTDot_SeqCUSP; V->ops->norm_local = VecNorm_SeqCUSP; V->ops->mdot_local = VecMDot_SeqCUSP; V->ops->maxpy = VecMAXPY_SeqCUSP; V->ops->mdot = VecMDot_SeqCUSP; V->ops->aypx = VecAYPX_SeqCUSP; V->ops->waxpy = VecWAXPY_SeqCUSP; V->ops->dotnorm2 = VecDotNorm2_SeqCUSP; V->ops->placearray = VecPlaceArray_SeqCUSP; V->ops->replacearray = VecReplaceArray_SeqCUSP; V->ops->resetarray = VecResetArray_SeqCUSP; V->ops->destroy = VecDestroy_SeqCUSP; V->ops->duplicate = VecDuplicate_SeqCUSP; V->ops->conjugate = VecConjugate_SeqCUSP; ierr = VecCUSPAllocateCheck(V);CHKERRQ(ierr); V->valid_GPU_array = PETSC_CUSP_GPU; ierr = VecSet(V,0.0);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayRead(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayRead(Vec v, CUSPARRAY **a) { PetscFunctionBegin; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); }
a812b596f5be0436e204c545613b7a01e0ad4502.cu
/* Implements the sequential cusp vectors. */ #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <cuda_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheckHost" /* Allocates space for the vector array on the Host if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheckHost(Vec v) { PetscErrorCode ierr; cudaError_t err; PetscScalar *array; Vec_Seq *s; PetscInt n = v->map->n; PetscFunctionBegin; s = (Vec_Seq*)v->data; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (s->array == 0) { ierr = PetscMalloc1(n,&array);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));CHKERRQ(ierr); s->array = array; s->array_allocated = array; err = cudaHostRegister(s->array, n*sizeof(PetscScalar),cudaHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck" /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheck(Vec v) { cudaError_t err; cudaStream_t stream; Vec_Seq *s = (Vec_Seq*)v->data; PetscFunctionBegin; // First allocate memory on the GPU if needed if (!v->spptr) { try { v->spptr = new Vec_CUSP; ((Vec_CUSP*)v->spptr)->GPUarray = new CUSPARRAY; ((Vec_CUSP*)v->spptr)->GPUarray->resize((PetscBLASInt)v->map->n); err = cudaStreamCreate(&stream);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->stream = stream; ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_FALSE; /* If the array is already allocated, one can register it as (page-locked) mapped. This can substantially accelerate data transfer across the PCI Express */ if (s->array) { err = cudaHostRegister(s->array, v->map->n*sizeof(PetscScalar),cudaHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } v->ops->destroy = VecDestroy_SeqCUSP; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU" /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUSPCopyToGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; cudaStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = cudaMemcpyAsync(varray->data().get(), *(PetscScalar**)v->data, v->map->n*sizeof(PetscScalar), cudaMemcpyHostToDevice, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome" static PetscErrorCode VecCUSPCopyToGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; cudaError_t err; PetscScalar *cpuPtr, *gpuPtr; cudaStream_t stream; Vec_Seq *s; VecScatterCUSPIndices_PtoP ptop_scatter = (VecScatterCUSPIndices_PtoP)ci->scatter; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { stream=((Vec_CUSP*)v->spptr)->stream; s = (Vec_Seq*)v->data; ierr = PetscLogEventBegin(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); varray = ((Vec_CUSP*)v->spptr)->GPUarray; gpuPtr = varray->data().get() + ptop_scatter->recvLowestIndex; cpuPtr = s->array + ptop_scatter->recvLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = cudaMemcpyAsync(gpuPtr, cpuPtr, ptop_scatter->nr*sizeof(PetscScalar), cudaMemcpyHostToDevice, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->recvIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->recvIndicesGPU; thrust::copy(thrust::make_permutation_iterator(s->array,indicesCPU->begin()), thrust::make_permutation_iterator(s->array,indicesCPU->end()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin())); #endif // Set the buffer states v->valid_GPU_array = PETSC_CUSP_BOTH; ierr = PetscLogEventEnd(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPU" /* VecCUSPCopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUSPCopyFromGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; cudaStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = cudaMemcpyAsync(*(PetscScalar**)v->data, varray->data().get(), v->map->n*sizeof(PetscScalar), cudaMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome" /* Note that this function only copies *some* of the values up from the GPU to CPU, which means that we need recombine the data at some point before using any of the standard functions. We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray where you have to always call in pairs */ PetscErrorCode VecCUSPCopyFromGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; cudaError_t err; PetscScalar *cpuPtr, *gpuPtr; cudaStream_t stream; Vec_Seq *s; VecScatterCUSPIndices_PtoP ptop_scatter = (VecScatterCUSPIndices_PtoP)ci->scatter; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); stream=((Vec_CUSP*)v->spptr)->stream; varray=((Vec_CUSP*)v->spptr)->GPUarray; s = (Vec_Seq*)v->data; gpuPtr = varray->data().get() + ptop_scatter->sendLowestIndex; cpuPtr = s->array + ptop_scatter->sendLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = cudaMemcpyAsync(cpuPtr, gpuPtr, ptop_scatter->ns*sizeof(PetscScalar), cudaMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->sendIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->sendIndicesGPU; thrust::copy(thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->end()), thrust::make_permutation_iterator(s->array,indicesCPU->begin())); #endif ierr = VecCUSPRestoreArrayRead(v,&varray);CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP_Private" static PetscErrorCode VecCopy_SeqCUSP_Private(Vec xin,Vec yin) { PetscScalar *ya; const PetscScalar *xa; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(xin); ierr = VecCUSPAllocateCheckHost(yin); if (xin != yin) { ierr = VecGetArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecGetArray(yin,&ya);CHKERRQ(ierr); ierr = PetscMemcpy(ya,xa,xin->map->n*sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecRestoreArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecRestoreArray(yin,&ya);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP_Private" static PetscErrorCode VecSetRandom_SeqCUSP_Private(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscInt n = xin->map->n,i; PetscScalar *xx; PetscFunctionBegin; ierr = VecGetArray(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) {ierr = PetscRandomGetValue(r,&xx[i]);CHKERRQ(ierr);} ierr = VecRestoreArray(xin,&xx);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP_Private" static PetscErrorCode VecDestroy_SeqCUSP_Private(Vec v) { Vec_Seq *vs = (Vec_Seq*)v->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectSAWsViewOff(v);CHKERRQ(ierr); #if defined(PETSC_USE_LOG) PetscLogObjectState((PetscObject)v,"Length=%D",v->map->n); #endif if (vs->array_allocated) ierr = PetscFree(vs->array_allocated);CHKERRQ(ierr); ierr = PetscFree(vs);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP_Private" static PetscErrorCode VecResetArray_SeqCUSP_Private(Vec vin) { Vec_Seq *v = (Vec_Seq*)vin->data; PetscFunctionBegin; v->array = v->unplacedarray; v->unplacedarray = 0; PetscFunctionReturn(0); } /* these following 3 public versions are necessary because we use CUSP in the regular PETSc code and these need to be called from plain C code. */ #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck_Public" PetscErrorCode VecCUSPAllocateCheck_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU_Public" PetscErrorCode VecCUSPCopyToGPU_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome_Public" /* VecCUSPCopyToGPUSome_Public - Copies certain entries down to the GPU from the CPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyToGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome_Public" /* VecCUSPCopyFromGPUSome_Public - Copies certain entries up to the CPU from the GPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyFromGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC VECSEQCUSP - VECSEQCUSP = "seqcusp" - The basic sequential vector, modified to use CUSP Options Database Keys: . -vec_type seqcusp - sets the vector type to VECSEQCUSP during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq() M*/ /* for VecAYPX_SeqCUSP*/ namespace cusp { namespace blas { namespace detail { template <typename T> struct AYPX : public thrust::binary_function<T,T,T> { T alpha; AYPX(T _alpha) : alpha(_alpha) {} __host__ __device__ T operator()(T x, T y) { return alpha * y + x; } }; } template <typename ForwardIterator1, typename ForwardIterator2, typename ScalarType> void aypx(ForwardIterator1 first1,ForwardIterator1 last1,ForwardIterator2 first2,ScalarType alpha) { thrust::transform(first1,last1,first2,first2,detail::AYPX<ScalarType>(alpha)); } template <typename Array1, typename Array2, typename ScalarType> void aypx(const Array1& x, Array2& y, ScalarType alpha) { detail::assert_same_dimensions(x,y); aypx(x.begin(),x.end(),y.begin(),alpha); } } } #undef __FUNCT__ #define __FUNCT__ "VecAYPX_SeqCUSP" PetscErrorCode VecAYPX_SeqCUSP(Vec yin, PetscScalar alpha, Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { if (alpha != 0.0) { cusp::blas::aypx(*xarray,*yarray,alpha); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } else { cusp::blas::copy(*xarray,*yarray); } ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecAXPY_SeqCUSP" PetscErrorCode VecAXPY_SeqCUSP(Vec yin,PetscScalar alpha,Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpy(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPPointwiseDivide { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) / thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecPointwiseDivide_SeqCUSP" PetscErrorCode VecPointwiseDivide_SeqCUSP(Vec win, Vec xin, Vec yin) { CUSPARRAY *warray=NULL,*xarray=NULL,*yarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), xarray->begin(), yarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), xarray->end(), yarray->end())), VecCUSPPointwiseDivide()); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } struct VecCUSPWAXPY { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t)*thrust::get<3>(t); } }; struct VecCUSPSum { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t); } }; struct VecCUSPDiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) - thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecWAXPY_SeqCUSP" PetscErrorCode VecWAXPY_SeqCUSP(Vec win,PetscScalar alpha,Vec xin, Vec yin) { CUSPARRAY *xarray=NULL,*yarray=NULL,*warray=NULL; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecCopy_SeqCUSP(yin,win);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPSum()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else if (alpha == -1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPDiff()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), thrust::make_constant_iterator(alpha), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), thrust::make_constant_iterator(alpha), xarray->end())), VecCUSPWAXPY()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* These functions are for the CUSP implementation of MAXPY with the loop unrolled on the CPU */ struct VecCUSPMAXPY4 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + 13*x3 +a4*x4 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t)+thrust::get<7>(t)*thrust::get<8>(t); } }; struct VecCUSPMAXPY3 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + a3*x3 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t); } }; struct VecCUSPMAXPY2 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2*/ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecMAXPY_SeqCUSP" PetscErrorCode VecMAXPY_SeqCUSP(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; CUSPARRAY *xarray,*yy0,*yy1,*yy2,*yy3; PetscInt n = xin->map->n,j,j_rem; PetscScalar alpha0,alpha1,alpha2,alpha3; PetscFunctionBegin; ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); switch (j_rem=nv&0x3) { case 3: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha += 3; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end())), VecCUSPMAXPY3()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); y += 3; break; case 2: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha +=2; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end())), VecCUSPMAXPY2()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } y +=2; break; case 1: alpha0 = *alpha++; ierr = VecAXPY_SeqCUSP(xin,alpha0,y[0]); y +=1; break; } for (j=j_rem; j<nv; j+=4) { alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha3 = alpha[3]; alpha += 4; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[3],&yy3);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin(), thrust::make_constant_iterator(alpha3), yy3->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end(), thrust::make_constant_iterator(alpha3), yy3->end())), VecCUSPMAXPY4()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[3],&yy3);CHKERRQ(ierr); y += 4; } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDot_SeqCUSP" PetscErrorCode VecDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; // PetscScalar *xptr,*yptr,*zgpu; //PetscReal tmp; PetscFunctionBegin; //VecNorm_SeqCUSP(xin, NORM_2, &tmp); //VecNorm_SeqCUSP(yin, NORM_2, &tmp); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) *z = cusp::blas::dotc(*yarray,*xarray); #else *z = cusp::blas::dot(*yarray,*xarray); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n >0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 // M = 2: __global__ void VecMDot_SeqCUSP_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUSP_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUSP_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUSP_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #undef __FUNCT__ #define __FUNCT__ "VecMDot_SeqCUSP" PetscErrorCode VecMDot_SeqCUSP(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,j,n = xin->map->n,current_y_index = 0; CUSPARRAY *xarray,*y0array,*y1array,*y2array,*y3array,*y4array,*y5array,*y6array,*y7array; PetscScalar *group_results_gpu,*xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel cudaError_t cuda_ierr; PetscFunctionBegin; // allocate scratchpad memory for the results of individual work groups: if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUSP not positive."); cuda_ierr = cudaMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not allocate CUDA work memory. Error code: %d", (int)cuda_ierr); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); xptr = thrust::raw_pointer_cast(xarray->data()); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); // run kernel: VecMDot_SeqCUSP_kernel4<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<4; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); // run kernel: VecMDot_SeqCUSP_kernel3<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<3; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); // run kernel: VecMDot_SeqCUSP_kernel2<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<2; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dotc(*y0array, *xarray); #else z[current_y_index] = cusp::blas::dot(*xarray, *y0array); #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); z[current_y_index+4] = cusp::blas::dot(*y4array,*xarray); z[current_y_index+5] = cusp::blas::dot(*y5array,*xarray); z[current_y_index+6] = cusp::blas::dot(*y6array,*xarray); z[current_y_index+7] = cusp::blas::dot(*y7array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); y4ptr = thrust::raw_pointer_cast(y4array->data()); y5ptr = thrust::raw_pointer_cast(y5array->data()); y6ptr = thrust::raw_pointer_cast(y6array->data()); y7ptr = thrust::raw_pointer_cast(y7array->data()); // run kernel: VecMDot_SeqCUSP_kernel8<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<8; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); current_y_index += 8; break; } } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); cuda_ierr = cudaFree(group_results_gpu); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host: %d", (int)cuda_ierr); ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM #undef __FUNCT__ #define __FUNCT__ "VecSet_SeqCUSP" PetscErrorCode VecSet_SeqCUSP(Vec xin,PetscScalar alpha) { CUSPARRAY *xarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; /* if there's a faster way to do the case alpha=0.0 on the GPU we should do that*/ ierr = VecCUSPGetArrayWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::fill(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayWrite(xin,&xarray); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScale_SeqCUSP" PetscErrorCode VecScale_SeqCUSP(Vec xin, PetscScalar alpha) { CUSPARRAY *xarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecSet_SeqCUSP(xin,alpha);CHKERRQ(ierr); } else if (alpha != 1.0) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::scal(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecTDot_SeqCUSP" PetscErrorCode VecTDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; //#if defined(PETSC_USE_COMPLEX) /*Not working for complex*/ //#else ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { *z = cusp::blas::dot(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } //#endif ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n > 0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP" PetscErrorCode VecCopy_SeqCUSP(Vec xin,Vec yin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (xin != yin) { if (xin->valid_GPU_array == PETSC_CUSP_GPU) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU if we are on the CPU*/ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_GPU) { /* copy in GPU */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSwap_SeqCUSP" PetscErrorCode VecSwap_SeqCUSP(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) cublasCswap(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuFloatComplex*)VecCUSPCastToRawPtr(*yarray),one); #else cublasZswap(bn,(cuDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuDoubleComplex*)VecCUSPCastToRawPtr(*yarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) cublasSswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #else cublasDswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #endif #endif ierr = cublasGetError();CHKERRCUSP(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPAX { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBY_SeqCUSP" PetscErrorCode VecAXPBY_SeqCUSP(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; if (a == 0.0) { ierr = VecScale_SeqCUSP(yin,beta);CHKERRQ(ierr); } else if (b == 1.0) { ierr = VecAXPY_SeqCUSP(yin,alpha,xin);CHKERRQ(ierr); } else if (a == 1.0) { ierr = VecAYPX_SeqCUSP(yin,beta,xin);CHKERRQ(ierr); } else if (b == 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( yarray->begin(), thrust::make_constant_iterator(a), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( yarray->end(), thrust::make_constant_iterator(a), xarray->end())), VecCUSPAX()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpby(*xarray,*yarray,*yarray,a,b); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* structs below are for special cases of VecAXPBYPCZ_SeqCUSP */ struct VecCUSPXPBYPCZ { /* z = x + b*y + c*z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<0>(t)+thrust::get<2>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; struct VecCUSPAXPBYPZ { /* z = ax + b*y + z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) += thrust::get<2>(t)*thrust::get<1>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBYPCZ_SeqCUSP" PetscErrorCode VecAXPBYPCZ_SeqCUSP(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; CUSPARRAY *xarray,*yarray,*zarray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(zin,&zarray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), thrust::make_constant_iterator(gamma), xarray->begin(), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), thrust::make_constant_iterator(gamma), xarray->end(), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPXPBYPCZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else if (gamma == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), xarray->begin(), thrust::make_constant_iterator(alpha), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), xarray->end(), thrust::make_constant_iterator(alpha), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPAXPBYPZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else { try { cusp::blas::axpbypcz(*xarray,*yarray,*zarray,*zarray,alpha,beta,gamma); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(zin,&zarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPointwiseMult_SeqCUSP" PetscErrorCode VecPointwiseMult_SeqCUSP(Vec win,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = win->map->n; CUSPARRAY *xarray,*yarray,*warray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(win,&warray);CHKERRQ(ierr); try { cusp::blas::xmy(*xarray,*yarray,*warray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogFlops(n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cusp */ #undef __FUNCT__ #define __FUNCT__ "VecNorm_SeqCUSP" PetscErrorCode VecNorm_SeqCUSP(Vec xin,NormType type,PetscReal *z) { const PetscScalar *xx; PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn; CUSPARRAY *xarray; PetscFunctionBegin; ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); try { *z = cusp::blas::nrm2(*xarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { PetscInt i; PetscReal max = 0.0,tmp; ierr = VecGetArrayRead(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) { if ((tmp = PetscAbsScalar(*xx)) > max) max = tmp; /* check special case of tmp == NaN */ if (tmp != tmp) {max = tmp; break;} xx++; } ierr = VecRestoreArrayRead(xin,&xx);CHKERRQ(ierr); *z = max; } else if (type == NORM_1) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) *z = cublasScasum(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one); #else *z = cublasDzasum(bn,(cuDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) *z = cublasSasum(bn,VecCUSPCastToRawPtr(*xarray),one); #else *z = cublasDasum(bn,VecCUSPCastToRawPtr(*xarray),one); #endif #endif ierr = cublasGetError();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUSP(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUSP(xin,NORM_2,z+1);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*the following few functions should be modified to actually work with the GPU so they don't force unneccesary allocation of CPU memory */ #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP" PetscErrorCode VecSetRandom_SeqCUSP(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecSetRandom_SeqCUSP_Private(xin,r);CHKERRQ(ierr); xin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP" PetscErrorCode VecResetArray_SeqCUSP(Vec vin) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecResetArray_SeqCUSP_Private(vin);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPlaceArray_SeqCUSP" PetscErrorCode VecPlaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecPlaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecReplaceArray_SeqCUSP" PetscErrorCode VecReplaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecReplaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreateSeqCUSP" /*@ VecCreateSeqCUSP - Creates a standard, sequential array-style vector. Collective on MPI_Comm Input Parameter: + comm - the communicator, should be PETSC_COMM_SELF - n - the vector length Output Parameter: . V - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. Level: intermediate Concepts: vectors^creating sequential .seealso: VecCreateMPI(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost() @*/ PetscErrorCode VecCreateSeqCUSP(MPI_Comm comm,PetscInt n,Vec *v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreate(comm,v);CHKERRQ(ierr); ierr = VecSetSizes(*v,n,n);CHKERRQ(ierr); ierr = VecSetType(*v,VECSEQCUSP);CHKERRQ(ierr); PetscFunctionReturn(0); } /*The following template functions are for VecDotNorm2_SeqCUSP. Note that there is no complex support as currently written*/ template <typename T> struct cuspdotnormcalculate : thrust::unary_function<T,T> { __host__ __device__ T operator()(T x) { #if defined(PETSC_USE_COMPLEX) //return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #else return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #endif } }; template <typename T> struct cuspdotnormreduce : thrust::binary_function<T,T,T> { __host__ __device__ T operator()(T x,T y) { return thrust::make_tuple(thrust::get<0>(x)+thrust::get<0>(y), thrust::get<1>(x)+thrust::get<1>(y)); } }; #undef __FUNCT__ #define __FUNCT__ "VecDotNorm2_SeqCUSP" PetscErrorCode VecDotNorm2_SeqCUSP(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscScalar zero = 0.0; PetscReal n=s->map->n; thrust::tuple<PetscScalar,PetscScalar> result; CUSPARRAY *sarray,*tarray; PetscFunctionBegin; /*ierr = VecCUSPCopyToGPU(s);CHKERRQ(ierr); ierr = VecCUSPCopyToGPU(t);CHKERRQ(ierr);*/ ierr = VecCUSPGetArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(t,&tarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) ierr = VecDot_SeqCUSP(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUSP(t,t,nm);CHKERRQ(ierr); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*dp),PetscImaginaryPart(*dp)); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*nm),PetscImaginaryPart(*nm)); #else result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( sarray->begin(), tarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( sarray->end(), tarray->end())), cuspdotnormcalculate<thrust::tuple<PetscScalar,PetscScalar> >(), thrust::make_tuple(zero,zero), /*init */ cuspdotnormreduce<thrust::tuple<PetscScalar, PetscScalar> >()); /* binary function */ *dp = thrust::get<0>(result); *nm = thrust::get<1>(result); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(t,&tarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDuplicate_SeqCUSP" PetscErrorCode VecDuplicate_SeqCUSP(Vec win,Vec *V) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateSeqCUSP(PetscObjectComm((PetscObject)win),win->map->n,V);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*V)->map);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*V))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*V))->qlist);CHKERRQ(ierr); (*V)->stash.ignorenegidx = win->stash.ignorenegidx; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP" PetscErrorCode VecDestroy_SeqCUSP(Vec v) { PetscErrorCode ierr; Vec_Seq *s = (Vec_Seq*)v->data; cudaError_t err; PetscFunctionBegin; try { if (v->spptr) { delete ((Vec_CUSP*)v->spptr)->GPUarray; err = cudaStreamDestroy(((Vec_CUSP*)v->spptr)->stream);CHKERRCUSP(err); /* If the host array has been registered as (page-locked) mapped, one must unregister the buffer */ if (((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked) { err = cudaHostUnregister(s->array);CHKERRCUSP(err); } delete (Vec_CUSP*) v->spptr; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecDestroy_SeqCUSP_Private(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return cusp::conj(x); } }; #endif #undef __FUNCT__ #define __FUNCT__ "VecConjugate_SeqCUSP" PetscErrorCode VecConjugate_SeqCUSP(Vec xin) { PetscErrorCode ierr; CUSPARRAY *xarray; PetscFunctionBegin; ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) thrust::transform(xarray->begin(), xarray->end(), xarray->begin(), conjugate()); #endif ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreate_SeqCUSP" PETSC_EXTERN PetscErrorCode VecCreate_SeqCUSP(Vec V) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)V),&size);CHKERRQ(ierr); if (size > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot create VECSEQCUSP on more than one process"); ierr = VecCreate_Seq_Private(V,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)V,VECSEQCUSP);CHKERRQ(ierr); V->ops->dot = VecDot_SeqCUSP; V->ops->norm = VecNorm_SeqCUSP; V->ops->tdot = VecTDot_SeqCUSP; V->ops->scale = VecScale_SeqCUSP; V->ops->copy = VecCopy_SeqCUSP; V->ops->set = VecSet_SeqCUSP; V->ops->swap = VecSwap_SeqCUSP; V->ops->axpy = VecAXPY_SeqCUSP; V->ops->axpby = VecAXPBY_SeqCUSP; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUSP; V->ops->pointwisemult = VecPointwiseMult_SeqCUSP; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUSP; V->ops->setrandom = VecSetRandom_SeqCUSP; V->ops->dot_local = VecDot_SeqCUSP; V->ops->tdot_local = VecTDot_SeqCUSP; V->ops->norm_local = VecNorm_SeqCUSP; V->ops->mdot_local = VecMDot_SeqCUSP; V->ops->maxpy = VecMAXPY_SeqCUSP; V->ops->mdot = VecMDot_SeqCUSP; V->ops->aypx = VecAYPX_SeqCUSP; V->ops->waxpy = VecWAXPY_SeqCUSP; V->ops->dotnorm2 = VecDotNorm2_SeqCUSP; V->ops->placearray = VecPlaceArray_SeqCUSP; V->ops->replacearray = VecReplaceArray_SeqCUSP; V->ops->resetarray = VecResetArray_SeqCUSP; V->ops->destroy = VecDestroy_SeqCUSP; V->ops->duplicate = VecDuplicate_SeqCUSP; V->ops->conjugate = VecConjugate_SeqCUSP; ierr = VecCUSPAllocateCheck(V);CHKERRQ(ierr); V->valid_GPU_array = PETSC_CUSP_GPU; ierr = VecSet(V,0.0);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayRead(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayRead(Vec v, CUSPARRAY **a) { PetscFunctionBegin; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); }
768a6b96cd2c84fa3df6618cf9ca5ce08d2d43c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void sum_kernel(int n, const float *x, float *z) { extern __shared__ float sdata[]; int offset = threadIdx.x * 256; float total = 0; for (int i = 0; i < 256; ++i) { if (offset + i < n) { total += x[offset + i]; } } sdata[threadIdx.x] = total; __syncthreads(); if (offset == 0) { float ttotal = 0; for (int i = 0; i < 256; ++i) { ttotal += sdata[i]; } z[0] = ttotal; } } void sum(int n, const float *x, float *z) { hipLaunchKernelGGL(( sum_kernel), dim3(1), dim3(256), 256 * sizeof(float), 0, n, x, z); }
768a6b96cd2c84fa3df6618cf9ca5ce08d2d43c3.cu
__global__ void sum_kernel(int n, const float *x, float *z) { extern __shared__ float sdata[]; int offset = threadIdx.x * 256; float total = 0; for (int i = 0; i < 256; ++i) { if (offset + i < n) { total += x[offset + i]; } } sdata[threadIdx.x] = total; __syncthreads(); if (offset == 0) { float ttotal = 0; for (int i = 0; i < 256; ++i) { ttotal += sdata[i]; } z[0] = ttotal; } } void sum(int n, const float *x, float *z) { sum_kernel<<<1, 256, 256 * sizeof(float)>>>(n, x, z); }
ff99fe1cd028248d6923def3eab5233f6e58fe45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <raft/random/rng.cuh> #include <selection/kselection.cuh> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK* key) { KVArray<TypeV, TypeK, N, Greater> arr; #pragma unroll for (int i = 0; i < N; ++i) { arr.arr[i].val = (TypeV)raft::laneId(); arr.arr[i].key = (TypeK)raft::laneId(); } raft::warpFence(); arr.sort(); raft::warpFence(); #pragma unroll for (int i = 0; i < N; ++i) arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK* key) { rmm::device_uvector<TypeK> dkey(TPB * N); hipLaunchKernelGGL(( sortTestKernel<TypeV, TypeK, N, TPB, Greater>), dim3(1), dim3(TPB), 0, 0, dkey.data()); CUDA_CHECK(hipPeekAtLastError()); raft::update_host<TypeK>(key, dkey.data(), TPB * N, 0); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV, typename TypeK, bool Greater> int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) { if (Greater == 0) { return a.val > b.val; } else { return a.val < b.val; } } template <typename TypeV, typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV, TypeK>* arr, int N, int k) { std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, int N, bool Greater> void sortKVArray(KVArray<TypeV, TypeK, N, Greater>& arr) { std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, bool Greater> ::testing::AssertionResult checkResult( TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows, int N, int k, TypeV tolerance) { for (int rIndex = 0; rIndex < rows; rIndex++) { // input data TypeV* h_arr = new TypeV[N]; raft::update_host(h_arr, d_arr + rIndex * N, N, rmm::cuda_stream_default); KVPair<TypeV, TypeK>* topk = new KVPair<TypeV, TypeK>[N]; for (int j = 0; j < N; j++) { topk[j].val = h_arr[j]; topk[j].key = j; } // result reference TypeV* h_outv = new TypeV[k]; raft::update_host(h_outv, d_outv + rIndex * k, k, rmm::cuda_stream_default); TypeK* h_outk = new TypeK[k]; raft::update_host(h_outk, d_outk + rIndex * k, k, rmm::cuda_stream_default); // calculate the result partSortKVPair<TypeV, TypeK, Greater>(topk, N, k); // check result for (int j = 0; j < k; j++) { // std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if (abs(h_outv[j] - topk[j].val) > tolerance) { return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } // delete resource delete[] h_arr; delete[] h_outv; delete[] h_outk; delete[] topk; } return ::testing::AssertionSuccess(); } // Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; // batch size int cols; // N the length of variables int k; // the top-k value unsigned long long int seed; // seed to generate data }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims) { return os; } // Define functions WarpTopKTest template <typename T> class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> { protected: WarpTopKTest() : arr(0, stream), outv(0, stream), outk(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); raft::random::Rng r(params.seed); CUDA_CHECK(hipStreamCreate(&stream)); arr.resize(params.rows * params.cols, stream); outk.resize(params.rows * params.k, stream); outv.resize(params.rows * params.k, stream); r.uniform(arr.data(), params.rows * params.cols, T(-1.0), T(1.0), stream); static const bool Sort = false; static const bool Greater = true; warpTopK<T, int, Greater, Sort>( outv.data(), outk.data(), arr.data(), params.k, params.rows, params.cols, stream); CUDA_CHECK(hipStreamDestroy(stream)); } protected: hipStream_t stream = 0; WarpTopKInputs<T> params; rmm::device_uvector<T> arr; rmm::device_uvector<T> outv; rmm::device_uvector<int> outk; }; // Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float>> inputs2_0 = {{0.00000001, 2, 1024, 256, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_1 = {{0.00000001, 4, 2048, 1024, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_2 = {{0.00000001, 4, 2048, 1, 1234ULL}}; // Milestone 2.2: Using the full thread queue and warp queue code to support // all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu // Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_1, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_2, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
ff99fe1cd028248d6923def3eab5233f6e58fe45.cu
/* * Copyright (c) 2018-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <stdlib.h> #include <algorithm> #include <limits> #include <raft/random/rng.cuh> #include <selection/kselection.cuh> namespace MLCommon { namespace Selection { template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> __global__ void sortTestKernel(TypeK* key) { KVArray<TypeV, TypeK, N, Greater> arr; #pragma unroll for (int i = 0; i < N; ++i) { arr.arr[i].val = (TypeV)raft::laneId(); arr.arr[i].key = (TypeK)raft::laneId(); } raft::warpFence(); arr.sort(); raft::warpFence(); #pragma unroll for (int i = 0; i < N; ++i) arr.arr[i].store(nullptr, key + threadIdx.x + i * TPB); } template <typename TypeV, typename TypeK, int N, int TPB, bool Greater> void sortTest(TypeK* key) { rmm::device_uvector<TypeK> dkey(TPB * N); sortTestKernel<TypeV, TypeK, N, TPB, Greater><<<1, TPB>>>(dkey.data()); CUDA_CHECK(cudaPeekAtLastError()); raft::update_host<TypeK>(key, dkey.data(), TPB * N, 0); } /************************************************************************/ /********************** Add the function for CPU test *******************/ /************************************************************************/ template <typename TypeV, typename TypeK, bool Greater> int cmp(KVPair<TypeV, TypeK> a, KVPair<TypeV, TypeK> b) { if (Greater == 0) { return a.val > b.val; } else { return a.val < b.val; } } template <typename TypeV, typename TypeK, bool Greater> void partSortKVPair(KVPair<TypeV, TypeK>* arr, int N, int k) { std::partial_sort(arr, arr + k, arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, int N, bool Greater> void sortKVArray(KVArray<TypeV, TypeK, N, Greater>& arr) { std::sort(arr.arr, arr.arr + N, cmp<TypeV, TypeK, Greater>); } template <typename TypeV, typename TypeK, bool Greater> ::testing::AssertionResult checkResult( TypeV* d_arr, TypeV* d_outv, TypeK* d_outk, int rows, int N, int k, TypeV tolerance) { for (int rIndex = 0; rIndex < rows; rIndex++) { // input data TypeV* h_arr = new TypeV[N]; raft::update_host(h_arr, d_arr + rIndex * N, N, rmm::cuda_stream_default); KVPair<TypeV, TypeK>* topk = new KVPair<TypeV, TypeK>[N]; for (int j = 0; j < N; j++) { topk[j].val = h_arr[j]; topk[j].key = j; } // result reference TypeV* h_outv = new TypeV[k]; raft::update_host(h_outv, d_outv + rIndex * k, k, rmm::cuda_stream_default); TypeK* h_outk = new TypeK[k]; raft::update_host(h_outk, d_outk + rIndex * k, k, rmm::cuda_stream_default); // calculate the result partSortKVPair<TypeV, TypeK, Greater>(topk, N, k); // check result for (int j = 0; j < k; j++) { // std::cout<<"Get value at ("<<rIndex<<" "<<j<<") Cpu " // <<topk[j].val<<" "<<topk[j].key<<" Gpu "<<h_outv[j]<<" " //<<h_outk[j] <<std::endl<<std::endl; if (abs(h_outv[j] - topk[j].val) > tolerance) { return ::testing::AssertionFailure() << "actual=" << topk[j].val << " != expected=" << h_outv[j]; } } // delete resource delete[] h_arr; delete[] h_outv; delete[] h_outk; delete[] topk; } return ::testing::AssertionSuccess(); } // Structure WarpTopKInputs template <typename T> struct WarpTopKInputs { T tolerance; int rows; // batch size int cols; // N the length of variables int k; // the top-k value unsigned long long int seed; // seed to generate data }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const WarpTopKInputs<T>& dims) { return os; } // Define functions WarpTopKTest template <typename T> class WarpTopKTest : public ::testing::TestWithParam<WarpTopKInputs<T>> { protected: WarpTopKTest() : arr(0, stream), outv(0, stream), outk(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<WarpTopKInputs<T>>::GetParam(); raft::random::Rng r(params.seed); CUDA_CHECK(cudaStreamCreate(&stream)); arr.resize(params.rows * params.cols, stream); outk.resize(params.rows * params.k, stream); outv.resize(params.rows * params.k, stream); r.uniform(arr.data(), params.rows * params.cols, T(-1.0), T(1.0), stream); static const bool Sort = false; static const bool Greater = true; warpTopK<T, int, Greater, Sort>( outv.data(), outk.data(), arr.data(), params.k, params.rows, params.cols, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: cudaStream_t stream = 0; WarpTopKInputs<T> params; rmm::device_uvector<T> arr; rmm::device_uvector<T> outv; rmm::device_uvector<int> outk; }; // Parameters // Milestone 1: Verify the result of current implementation // Milestone 2: Support all the values of k between 1 and 1024; both inclusive // Milestone 2.1: Using the POC code to Support all the values const std::vector<WarpTopKInputs<float>> inputs2_0 = {{0.00000001, 2, 1024, 256, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_1 = {{0.00000001, 4, 2048, 1024, 1234ULL}}; const std::vector<WarpTopKInputs<float>> inputs2_2 = {{0.00000001, 4, 2048, 1, 1234ULL}}; // Milestone 2.2: Using the full thread queue and warp queue code to support // all the values // @TODO: Milestone 3: Support not sorted // @TODO: Milestone 4: Support multi-gpu // Define the function TEST_P typedef WarpTopKTest<float> TestD2_0; typedef WarpTopKTest<float> TestD2_1; typedef WarpTopKTest<float> TestD2_2; TEST_P(TestD2_0, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_1, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } TEST_P(TestD2_2, Result) { const static bool Greater = true; ASSERT_TRUE((checkResult<float, int, Greater>( arr.data(), outv.data(), outk.data(), params.rows, params.cols, params.k, params.tolerance))); } // Instantiate INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_0, ::testing::ValuesIn(inputs2_0)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_1, ::testing::ValuesIn(inputs2_1)); INSTANTIATE_TEST_CASE_P(WarpTopKTests, TestD2_2, ::testing::ValuesIn(inputs2_2)); } // end namespace Selection } // end namespace MLCommon
218701a3c110f9c5327d9c63ab7ba0c1cf11df9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/softmax_focal_loss_op.cu // Jiajun Tang // [email protected] #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SpatialSoftmaxForward(const int nthreads, const T* Xdata, T* Pdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { // Subtract max on each cell for numerical reasons T max_val = -FLT_MAX; for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ max_val = max(max_val, Xdata[c]); } // Exponentiate T expsum = 0.0; for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ T expx = exp(Xdata[c] - max_val); Pdata[c] = expx; expsum += expx; } // Normalize for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ Pdata[c] /= expsum; } } } template <typename T> __global__ void SoftmaxFocalLossForward(const int nthreads, const T* Pdata, const int* targets, const int num_classes, const float gamma, const float alpha, T* losses) { CUDA_1D_KERNEL_LOOP(n, nthreads) { const int label = static_cast<int>(targets[n]); // alpha flag. T af1 = (alpha >= 0); T af2 = (1.0 - af1); T z = ((label == 0) * (1 - alpha) + (label >= 1) * alpha) * af1 + af2; losses[n] = 0.0; if (label >= 0){ int idx = n * num_classes + label; losses[n] = -(pow(1.0 - Pdata[idx], gamma) * log(max(Pdata[idx], FLT_MIN))) * z; } } // CUDA_1D_KERNEL_LOOP } // SoftmaxFocalLossForward template <typename T> __global__ void SoftmaxFocalLossBackwardWeight(const int nthreads, const T* Pdata, const int* targets, const int num_classes, const float gamma, const float alpha, T* buff) { CUDA_1D_KERNEL_LOOP(n, nthreads) { const int label = static_cast<int>(targets[n]); // alpha flag. T af1 = (alpha >= 0); T af2 = (1.0 - af1); T z = ((label == 0) * (1 - alpha) + (label >= 1) * alpha) * af1 + af2; buff[n] = 0.0; if (label >= 0) { int idx = n * num_classes + label; T onemp = 1. - Pdata[idx]; T p = Pdata[idx]; buff[n] = (-pow(onemp, gamma) + gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z; } } } template <typename T> __global__ void SoftmaxFocalLossBackward(const int nthreads, const T* Pdata, const int* targets, const T* d_losses, const T* buff, const int num_classes, const float gamma, const float alpha, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int c = i % num_classes; T d_loss = d_losses[n]; const int label = static_cast<int>(targets[n]); T c1 = (label >= 0) * 1.0; T c2 = (label == c) * 1.0; d_logits[i] = c1 * d_loss * buff[n] * (c2 - Pdata[i]); } // CUDA_1D_KERNEL_LOOP } // SoftmaxFocalLossBackward std::tuple<at::Tensor, at::Tensor> SoftmaxFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const float gamma, const float alpha) { AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.dim() == 1, "targets should be N"); AT_ASSERTM(logits.size(0) == targets.size(0), "dim(0) of targets should be the same as dim(0) of logits."); const int num_samples = logits.size(0); const int num_classes = logits.size(1); auto losses = at::empty({num_samples}, logits.options()); auto losses_size = static_cast<long>(num_samples); auto P = at::empty({num_samples, num_classes}, logits.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv(losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(losses, P); } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SpatialSoftmax_forward", [&] { hipLaunchKernelGGL(( SpatialSoftmaxForward<scalar_t>), dim3(grid), dim3(block), 0, stream, losses_size, logits.contiguous().data_ptr<scalar_t>(), P.data_ptr<scalar_t>(), num_classes); }); AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_forward", [&] { hipLaunchKernelGGL(( SoftmaxFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, stream, losses_size, P.data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, losses.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return std::make_tuple(losses, P); } at::Tensor SoftmaxFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& P, const at::Tensor& d_losses, const float gamma, const float alpha) { AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.dim() == 1, "targets should be N"); AT_ASSERTM(logits.size(0) == targets.size(0), "dim(0) of targets should be the same as dim(0) of logits."); const int num_samples = logits.size(0); const int num_classes = logits.size(1); auto buff = at::zeros({num_samples},logits.options()); auto buff_size = static_cast<long>(num_samples); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = static_cast<long>(num_samples) * num_classes; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid1(::min(THCCeilDiv(buff_size, 512L), 4096L)); dim3 grid2(::min(THCCeilDiv(d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backwardWeight", [&] { hipLaunchKernelGGL(( SoftmaxFocalLossBackwardWeight<scalar_t>), dim3(grid1), dim3(block), 0, stream, buff_size, P.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, buff.data_ptr<scalar_t>()); }); AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backward", [&] { hipLaunchKernelGGL(( SoftmaxFocalLossBackward<scalar_t>), dim3(grid2), dim3(block), 0, stream, d_logits_size, P.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), d_losses.contiguous().data_ptr<scalar_t>(), buff.data_ptr<scalar_t>(), num_classes, gamma, alpha, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; }
218701a3c110f9c5327d9c63ab7ba0c1cf11df9e.cu
// This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/softmax_focal_loss_op.cu // Jiajun Tang // [email protected] #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SpatialSoftmaxForward(const int nthreads, const T* Xdata, T* Pdata, const int num_classes) { CUDA_1D_KERNEL_LOOP(i, nthreads) { // Subtract max on each cell for numerical reasons T max_val = -FLT_MAX; for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ max_val = max(max_val, Xdata[c]); } // Exponentiate T expsum = 0.0; for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ T expx = exp(Xdata[c] - max_val); Pdata[c] = expx; expsum += expx; } // Normalize for(int c = i * num_classes; c < (i + 1) * num_classes; ++c){ Pdata[c] /= expsum; } } } template <typename T> __global__ void SoftmaxFocalLossForward(const int nthreads, const T* Pdata, const int* targets, const int num_classes, const float gamma, const float alpha, T* losses) { CUDA_1D_KERNEL_LOOP(n, nthreads) { const int label = static_cast<int>(targets[n]); // alpha flag. T af1 = (alpha >= 0); T af2 = (1.0 - af1); T z = ((label == 0) * (1 - alpha) + (label >= 1) * alpha) * af1 + af2; losses[n] = 0.0; if (label >= 0){ int idx = n * num_classes + label; losses[n] = -(pow(1.0 - Pdata[idx], gamma) * log(max(Pdata[idx], FLT_MIN))) * z; } } // CUDA_1D_KERNEL_LOOP } // SoftmaxFocalLossForward template <typename T> __global__ void SoftmaxFocalLossBackwardWeight(const int nthreads, const T* Pdata, const int* targets, const int num_classes, const float gamma, const float alpha, T* buff) { CUDA_1D_KERNEL_LOOP(n, nthreads) { const int label = static_cast<int>(targets[n]); // alpha flag. T af1 = (alpha >= 0); T af2 = (1.0 - af1); T z = ((label == 0) * (1 - alpha) + (label >= 1) * alpha) * af1 + af2; buff[n] = 0.0; if (label >= 0) { int idx = n * num_classes + label; T onemp = 1. - Pdata[idx]; T p = Pdata[idx]; buff[n] = (-pow(onemp, gamma) + gamma * pow(onemp, gamma - 1) * p * log(max(p, FLT_MIN))) * z; } } } template <typename T> __global__ void SoftmaxFocalLossBackward(const int nthreads, const T* Pdata, const int* targets, const T* d_losses, const T* buff, const int num_classes, const float gamma, const float alpha, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int c = i % num_classes; T d_loss = d_losses[n]; const int label = static_cast<int>(targets[n]); T c1 = (label >= 0) * 1.0; T c2 = (label == c) * 1.0; d_logits[i] = c1 * d_loss * buff[n] * (c2 - Pdata[i]); } // CUDA_1D_KERNEL_LOOP } // SoftmaxFocalLossBackward std::tuple<at::Tensor, at::Tensor> SoftmaxFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const float gamma, const float alpha) { AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.dim() == 1, "targets should be N"); AT_ASSERTM(logits.size(0) == targets.size(0), "dim(0) of targets should be the same as dim(0) of logits."); const int num_samples = logits.size(0); const int num_classes = logits.size(1); auto losses = at::empty({num_samples}, logits.options()); auto losses_size = static_cast<long>(num_samples); auto P = at::empty({num_samples, num_classes}, logits.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv(losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(losses, P); } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SpatialSoftmax_forward", [&] { SpatialSoftmaxForward<scalar_t><<<grid, block, 0, stream>>>( losses_size, logits.contiguous().data_ptr<scalar_t>(), P.data_ptr<scalar_t>(), num_classes); }); AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_forward", [&] { SoftmaxFocalLossForward<scalar_t><<<grid, block, 0, stream>>>( losses_size, P.data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, losses.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(losses, P); } at::Tensor SoftmaxFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& P, const at::Tensor& d_losses, const float gamma, const float alpha) { AT_ASSERTM(logits.is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); AT_ASSERTM(targets.dim() == 1, "targets should be N"); AT_ASSERTM(logits.size(0) == targets.size(0), "dim(0) of targets should be the same as dim(0) of logits."); const int num_samples = logits.size(0); const int num_classes = logits.size(1); auto buff = at::zeros({num_samples},logits.options()); auto buff_size = static_cast<long>(num_samples); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = static_cast<long>(num_samples) * num_classes; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid1(std::min(THCCeilDiv(buff_size, 512L), 4096L)); dim3 grid2(std::min(THCCeilDiv(d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backwardWeight", [&] { SoftmaxFocalLossBackwardWeight<scalar_t><<<grid1, block, 0, stream>>>( buff_size, P.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), num_classes, gamma, alpha, buff.data_ptr<scalar_t>()); }); AT_DISPATCH_FLOATING_TYPES(logits.scalar_type(), "SoftmaxFocalLoss_backward", [&] { SoftmaxFocalLossBackward<scalar_t><<<grid2, block, 0, stream>>>( d_logits_size, P.contiguous().data_ptr<scalar_t>(), targets.contiguous().data_ptr<int>(), d_losses.contiguous().data_ptr<scalar_t>(), buff.data_ptr<scalar_t>(), num_classes, gamma, alpha, d_logits.data_ptr<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; }
4ae99bdc56567145d7cd87d5b17d4c9327c34b43.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Modified by Nuttiiya Seekhao to support volume rendering of float value // from main memory // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <assert.h> #include <helper_cuda.h> #include <helper_math.h> #include "../../enums.h" // Note: Originally, including common_vis worked. However, it stopped working due to GLX inclusion. // TODO: Separate GLX includes and definitions #include "../../common.h" //#include "./common_vis.h" //#include "./VolumeManager.h" typedef unsigned int uint; typedef unsigned char uchar; //typedef unsigned char VolumeType; typedef float VolumeType; VolumeType *d_svBuffer[m_ecmtotal] = {0}; //hipArray *d_svArray[m_ecmtotal] = {0}; hipArray *d_volumeArray[m_ecmtotal] = {0}; #ifdef ECV_SAMPLE_CHEM hipArray *d_chemsample_h[TOTAL_CHEM]; #endif hipArray *d_transferFuncArrayCol = {0}; hipArray *d_transferFuncArrayEla = {0}; hipArray *d_transferFuncArrayHya = {0}; #ifdef AVEP surface<void, cudaSurfaceType3D> srfCol; surface<void, cudaSurfaceType3D> srfEla; surface<void, cudaSurfaceType3D> srfHya; #endif // AVEP texture<VolumeType, 3, hipReadModeElementType> texCol; texture<VolumeType, 3, hipReadModeElementType> texEla; texture<VolumeType, 3, hipReadModeElementType> texHya; //texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, hipReadModeElementType> transferTexCol; // 1D transfer function texture texture<float4, 1, hipReadModeElementType> transferTexEla; texture<float4, 1, hipReadModeElementType> transferTexHya; #ifdef ECV_SAMPLE_CHEM texture<VolumeType, 3, hipReadModeElementType> texChem0; texture<VolumeType, 3, hipReadModeElementType> texChem1; texture<VolumeType, 3, hipReadModeElementType> texChem2; texture<VolumeType, 3, hipReadModeElementType> texChem3; texture<VolumeType, 3, hipReadModeElementType> texChem4; texture<VolumeType, 3, hipReadModeElementType> texChem5; texture<VolumeType, 3, hipReadModeElementType> texChem6; texture<VolumeType, 3, hipReadModeElementType> texChem7; surface<void, cudaSurfaceType3D> srfChem0; surface<void, cudaSurfaceType3D> srfChem1; surface<void, cudaSurfaceType3D> srfChem2; surface<void, cudaSurfaceType3D> srfChem3; surface<void, cudaSurfaceType3D> srfChem4; surface<void, cudaSurfaceType3D> srfChem5; surface<void, cudaSurfaceType3D> srfChem6; surface<void, cudaSurfaceType3D> srfChem7; texture<float4, 1, hipReadModeElementType> transferTexChem0; texture<float4, 1, hipReadModeElementType> transferTexChem1; texture<float4, 1, hipReadModeElementType> transferTexChem2; texture<float4, 1, hipReadModeElementType> transferTexChem3; texture<float4, 1, hipReadModeElementType> transferTexChem4; texture<float4, 1, hipReadModeElementType> transferTexChem5; texture<float4, 1, hipReadModeElementType> transferTexChem6; texture<float4, 1, hipReadModeElementType> transferTexChem7; hipArray *d_transferFuncArrayChem0 = {0}; hipArray *d_transferFuncArrayChem1 = {0}; hipArray *d_transferFuncArrayChem2 = {0}; hipArray *d_transferFuncArrayChem3 = {0}; hipArray *d_transferFuncArrayChem4 = {0}; hipArray *d_transferFuncArrayChem5 = {0}; hipArray *d_transferFuncArrayChem6 = {0}; hipArray *d_transferFuncArrayChem7 = {0}; #endif // ECV_SAMPLE_CHEM typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm //Round a / b to nearest higher integer value int iDivUp_AVEP(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } #ifdef ECV_SAMPLE_CHEM #ifdef ECV_SAMPLE_CHEM_TEST __device__ float smult[TOTAL_CHEM] = { 50000.0f, 10000.0f, 1000000.0f, 100.0f, 10000.0f, 100000.0f, 100000.0f, 10000.0f }; // sample multiplier __global__ void sampleChem_kernel( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; const bool validZ = (0 <= z) && (z < dataD); const bool validY = (0 <= y) && (y < dataH); const bool validX = (0 <= x) && (x < dataW); const bool validZ_h = validZ && (z%ECV_SAMPLE_STRIDE_HGH == 0); const bool validY_h = validY && (y%ECV_SAMPLE_STRIDE_HGH == 0); const bool validX_h = validX && (x%ECV_SAMPLE_STRIDE_HGH == 0); const bool validZ_l = validZ && (z%ECV_SAMPLE_STRIDE_LOW == 0); const bool validY_l = validY && (y%ECV_SAMPLE_STRIDE_LOW == 0); const bool validX_l = validX && (x%ECV_SAMPLE_STRIDE_LOW == 0); const int sampleW_l = dataW / ECV_SAMPLE_STRIDE_LOW; const int sampleH_l = dataH / ECV_SAMPLE_STRIDE_LOW; const int sampleD_l = dataD / ECV_SAMPLE_STRIDE_LOW; const int sampleW_h = dataW / ECV_SAMPLE_STRIDE_HGH; const int sampleH_h = dataH / ECV_SAMPLE_STRIDE_HGH; const int sampleD_h = dataD / ECV_SAMPLE_STRIDE_HGH; int dx_l = x/ECV_SAMPLE_STRIDE_LOW; int dy_l = y/ECV_SAMPLE_STRIDE_LOW; int dz_l = z/ECV_SAMPLE_STRIDE_LOW; int dx_h = x/ECV_SAMPLE_STRIDE_HGH; int dy_h = y/ECV_SAMPLE_STRIDE_HGH; int dz_h = z/ECV_SAMPLE_STRIDE_HGH; const bool validDx_l = (dx_l < sampleW_l); const bool validDy_l = (dy_l < sampleH_l); const bool validDz_l = (dz_l < sampleD_l); const bool validDx_h = (dx_h < sampleW_h); const bool validDy_h = (dy_h < sampleH_h); const bool validDz_h = (dz_h < sampleD_h); if (validZ_h && validY_h && validX_h && validDx_h && validDy_h && validDz_h) { float sample = d_Src[z * dataH * dataW + y * dataW + x] * 50000000.0f;//*smult[chemIndex]; if (sample > 1.0f) sample = 1.0f; // if (sample < 0.01f) sample = 1.0f; surf3Dwrite(sample, srfChem0, dx_h * sizeof(float), dy_h, dz_h); } if (validZ_l && validY_l && validX_l && validDx_l && validDy_l && validDz_l) { float sample = d_Src[z * dataH * dataW + y * dataW + x] * 50000000.0f;//*smult[chemIndex]; if (sample > 1.0f) sample = 1.0f; // if (sample < 0.001f) sample = 1.0f; surf3Dwrite(sample, srfChem2, dx_l * sizeof(float), dy_l, dz_l); } } #else // ECV_SAMPLE_CHEM_TEST __global__ void sampleChem_kernel( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; const bool validZ = (0 <= z) && (z < dataD) && (z%ECV_SAMPLE_STRIDE == 0); const bool validY = (0 <= y) && (y < dataH) && (y%ECV_SAMPLE_STRIDE == 0); const bool validX = (0 <= x) && (x < dataW) && (x%ECV_SAMPLE_STRIDE == 0); // const bool validz = (z >= 0) && (z < datad); // const bool validy = (y >= 0) && (y < datah); // const bool validx = (x >= 0) && (x < dataw); const int sampleW = dataW / ECV_SAMPLE_STRIDE; const int sampleH = dataH / ECV_SAMPLE_STRIDE; const int sampleD = dataD / ECV_SAMPLE_STRIDE; int dx = x/ECV_SAMPLE_STRIDE; int dy = y/ECV_SAMPLE_STRIDE; int dz = z/ECV_SAMPLE_STRIDE; const bool validDx = (dx < sampleW); const bool validDy = (dy < sampleH); const bool validDz = (dz < sampleD); if (validZ && validY && validX && validDx && validDy && validDz) { float sample = d_Src[z * dataH * dataW + y * dataW + x]; switch (chemIndex) { case 0: sample *= 50000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem0, dx * sizeof(float), dy, dz); break; case 1: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem1, dx * sizeof(float), dy, dz); break; case 2: sample *= 1000000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem2, dx * sizeof(float), dy, dz); break; case 3: sample *= 100.0f;//5000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem3, dx * sizeof(float), dy, dz); break; case 4: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem4, dx * sizeof(float), dy, dz); break; case 5: sample *= 100000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem5, dx * sizeof(float), dy, dz); break; case 6: sample *= 100000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem6, dx * sizeof(float), dy, dz); break; case 7: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem7, dx * sizeof(float), dy, dz); break; } } } #endif // ECV_SAMPLE_CHEM_TEST extern "C" void sampleChem( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(dataW, threads.x), iDivUp_AVEP(dataH, threads.y), iDivUp_AVEP(dataD, threads.z)); printf(" sampling chem [%dx%dx%d] ...\n", dataW, dataH, dataD); hipLaunchKernelGGL(( sampleChem_kernel), dim3(grid), dim3(threads), 0, 0, d_Src, dataD, dataH, dataW, chemIndex ); getLastCudaError("sampleChem_kernel<<<>>> execution failed\n"); } #endif // ECV_SAMPLE_CHEM #ifdef AVEP #ifdef AVEP_INC __global__ void bufferToVolumeAVEP_round0_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, hipPos offset, ecm_i ecmType) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float sample_dst = 0.0f; switch (ecmType) { case m_col: surf3Dread(&sample_dst, srfCol, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; case m_ela: surf3Dread(&sample_dst, srfEla, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; case m_hya: surf3Dread(&sample_dst, srfHya, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; default: surf3Dread(&sample_dst, srfCol, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; } // write diff to source d_Src[vz*volumeW*volumeH + vy*volumeW + vx] -= sample_dst; if (d_Src[vz*volumeW*volumeH + vy*volumeW + vx] == 0.0f) d_Src[vz*volumeW*volumeH + vy*volumeW + vx] = 0.4f; } } __global__ void bufferToVolumeAVEP_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, hipPos offset, ecm_i ecmType, int incRound, float incFactor) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float multiplier = ((float) (incRound + 1))*incFactor; float sample = 0.0f; switch (ecmType) { case m_col: surf3Dread(&sample, srfCol, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz); break; case m_ela: surf3Dread(&sample, srfEla, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfEla, vx * sizeof(VolumeType), vy, vz); break; case m_hya: surf3Dread(&sample, srfHya, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfHya, vx * sizeof(VolumeType), vy, vz); break; default: surf3Dread(&sample, srfCol, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz); break; } } } extern "C" void bufferToVolumeAVEP( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, hipPos offset, ecm_i ecmType, int incRound, float incFactor) { assert(d_Src != d_Dst); assert(svW <= volumeW); assert(svH <= volumeH); assert(svD <= volumeD); dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(svW, threads.x), iDivUp_AVEP(svH, threads.y), iDivUp_AVEP(svD, threads.z)); if (!incRound) // round 0 { hipLaunchKernelGGL(( bufferToVolumeAVEP_round0_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType ); hipLaunchKernelGGL(( bufferToVolumeAVEP_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType, incRound, incFactor ); } else { hipLaunchKernelGGL(( bufferToVolumeAVEP_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType, incRound, incFactor ); } getLastCudaError("bufferToVolumeAVEP_kernel<<<>>> execution failed\n"); } #else // AVEP_INC __global__ void bufferToVolumeAVEP_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, hipPos offset, ecm_i ecmType) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float sample = d_Src[z*SV_W*SV_H + y*SV_W + x]; // use buffer dimensions switch (ecmType) { case m_col: surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; case m_ela: surf3Dwrite(sample, srfEla, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; case m_hya: surf3Dwrite(sample, srfHya, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; default: surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz);//, hipBoundaryModeZero); break; } } } extern "C" void bufferToVolumeAVEP( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, hipPos offset, ecm_i ecmType) { assert(d_Src != d_Dst); assert(svW <= volumeW); assert(svH <= volumeH); assert(svD <= volumeD); dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(svW, threads.x), iDivUp_AVEP(svH, threads.y), iDivUp_AVEP(svD, threads.z)); hipLaunchKernelGGL(( bufferToVolumeAVEP_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType ); getLastCudaError("bufferToVolumeAVEP_kernel<<<>>> execution failed\n"); } #endif // AVEP_INC #endif __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float sample = tex3D(texCol, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D(transferTexCol, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } #ifdef ECV_SAMPLE_CHEM_TEST __global__ void d_render_test_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int chemType, bool isHighRes) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); float sample_chem; float4 col_chem; if (isHighRes) { sample_chem = tex3D(texChem0, posx, posy, posz); } else { // low resolution sample_chem = tex3D(texChem2, posx, posy, posz); } // lookup in transfer function texture switch (chemType) { case 0: col_chem = tex1D(transferTexChem0, (sample_chem-transferOffset)*transferScale); break; case 1: col_chem = tex1D(transferTexChem1, (sample_chem-transferOffset)*transferScale); break; case 2: col_chem = tex1D(transferTexChem2, (sample_chem-transferOffset)*transferScale); break; case 3: col_chem = tex1D(transferTexChem3, (sample_chem-transferOffset)*transferScale); break; case 4: col_chem = tex1D(transferTexChem4, (sample_chem-transferOffset)*transferScale); break; case 5: col_chem = tex1D(transferTexChem5, (sample_chem-transferOffset)*transferScale); break; case 6: col_chem = tex1D(transferTexChem6, (sample_chem-transferOffset)*transferScale); break; case 7: col_chem = tex1D(transferTexChem7, (sample_chem-transferOffset)*transferScale); break; } col_chem.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col_chem.x *= col_chem.w; col_chem.y *= col_chem.w; col_chem.z *= col_chem.w; // "over" operator for front-to-back blending sum = sum + col_chem*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } #endif // ECV_SAMPLE_CHEM_TEST __global__ void d_render_sp_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int gpu_id) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); // const float x_halfwidth_chem = x_halfwidth/ECV_SAMPLE_STRIDE; // const float y_halfwidth_chem = y_halfwidth/ECV_SAMPLE_STRIDE; // const float z_halfwidth_chem = z_halfwidth/ECV_SAMPLE_STRIDE; const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); // const float3 boxMin_chem = make_float3(-1.0f*x_halfwidth_chem, -1.0f*y_halfwidth_chem, -1.0f*z_halfwidth_chem); // const float3 boxMax_chem = make_float3( 1.0f*x_halfwidth_chem, 1.0f*y_halfwidth_chem, 1.0f*z_halfwidth_chem); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction using largest dimension as reference #ifdef ECV_SEPARATE float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; #else // ECV_SEPARATE const float ray_ref = (float) max(imageW, imageH); float u = ((float) x / ray_ref)*2.0f - (imageW/ray_ref);//(float) imageW)*2.0f-1.0f; float v = ((float) y / ray_ref)*2.0f - (imageH/ray_ref);//(float) imageH)*2.0f-1.0f; #endif // ECV_SEPARATE // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); float posx_chem = posx;//(pos.x + x_halfwidth_chem)/(2.0f*x_halfwidth_chem); float posy_chem = posy;//(pos.y + y_halfwidth_chem)/(2.0f*y_halfwidth_chem); float posz_chem = posz;//(pos.z + z_halfwidth_chem)/(2.0f*z_halfwidth_chem); float sample_chem0, sample_chem1; float sample_chem2, sample_chem3; float4 col_chem; float4 col_chem0, col_chem1; float4 col_chem2, col_chem3; // float sample; // float4 col; // Assuming 2 GPUs // TODO: GPUs > 2 float blendFactor = 0.5f; if (gpu_id == 0) { sample_chem0 = 10.0f * tex3D(texChem0, posx_chem, posy_chem, posz_chem); sample_chem1 = 1.0f * tex3D(texChem2, posx_chem, posy_chem, posz_chem); sample_chem2 = 1.0f * tex3D(texChem4, posx_chem, posy_chem, posz_chem); sample_chem3 = 1.0f * tex3D(texChem6, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem0, (sample_chem0-transferOffset)*transferScale); col_chem1 = tex1D(transferTexChem2, (sample_chem1-transferOffset)*transferScale); col_chem2 = tex1D(transferTexChem4, (sample_chem2-transferOffset)*transferScale); col_chem3 = tex1D(transferTexChem6, (sample_chem3-transferOffset)*transferScale); // blend col_chem.x = 0.25f*col_chem0.x + 0.25f*col_chem1.x + 0.25f*col_chem2.x + 0.25f*col_chem3.x; col_chem.y = 0.25f*col_chem0.y + 0.25f*col_chem1.y + 0.25f*col_chem2.y + 0.25f*col_chem3.y; col_chem.z = 0.25f*col_chem0.z + 0.25f*col_chem1.z + 0.25f*col_chem2.z + 0.25f*col_chem3.z; col_chem.w = 0.25f*col_chem0.w + 0.25f*col_chem1.w + 0.25f*col_chem2.w + 0.25f*col_chem3.w; } else { sample_chem0 = 1.0f * tex3D(texChem1, posx_chem, posy_chem, posz_chem); sample_chem1 = 1.0f * tex3D(texChem3, posx_chem, posy_chem, posz_chem); sample_chem2 = 1.0f * tex3D(texChem5, posx_chem, posy_chem, posz_chem); sample_chem3 = 1.0f * tex3D(texChem7, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem1, (sample_chem0-transferOffset)*transferScale); col_chem1 = tex1D(transferTexChem3, (sample_chem1-transferOffset)*transferScale); col_chem2 = tex1D(transferTexChem5, (sample_chem2-transferOffset)*transferScale); col_chem3 = tex1D(transferTexChem7, (sample_chem3-transferOffset)*transferScale); // blend col_chem.x = 1.0f*col_chem0.x + 1.0f*col_chem1.x + 1.0f*col_chem2.x + 1.0f*col_chem3.x; col_chem.y = 1.0f*col_chem0.y + 1.0f*col_chem1.y + 1.0f*col_chem2.y + 1.0f*col_chem3.y; col_chem.z = 1.0f*col_chem0.z + 1.0f*col_chem1.z + 1.0f*col_chem2.z + 1.0f*col_chem3.z; col_chem.w = 1.0f*col_chem0.w + 1.0f*col_chem1.w + 1.0f*col_chem2.w + 1.0f*col_chem3.w; } col_chem.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col_chem.x *= col_chem.w; col_chem.y *= col_chem.w; col_chem.z *= col_chem.w; // "over" operator for front-to-back blending sum = sum + col_chem*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } __global__ void d_render_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int ecmChemType, bool isChem) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction using largest dimension as reference #ifdef ECV_SEPARATE float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; #else // ECV_SEPARATE const float ray_ref = (float) max(imageW, imageH); float u = ((float) x / ray_ref)*2.0f - (imageW/ray_ref);//(float) imageW)*2.0f-1.0f; float v = ((float) y / ray_ref)*2.0f - (imageH/ray_ref);//(float) imageH)*2.0f-1.0f; #endif // ECV_SEPARATE // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); #ifdef ECV_SAMPLE_CHEM #ifdef ECV_INTERLEAVE // float posx_chem = (pos.x + x_halfwidth_chem)/(2.0f*x_halfwidth_chem); // float posy_chem = (pos.y + y_halfwidth_chem)/(2.0f*y_halfwidth_chem); // float posz_chem = (pos.z + z_halfwidth_chem)/(2.0f*z_halfwidth_chem); float sample_chem0, sample_chem2; float sample_chem4, sample_chem6; float4 col_chem; float4 col_chem0, col_chem2; float4 col_chem4, col_chem6; #endif // ECV_INTERLEAVE #endif // ECV_SAMPLE_CHEM float sample; float4 col; if (isChem) { #ifdef ECV_SAMPLE_CHEM switch (ecmChemType) { case 0: sample = tex3D(texChem0, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem0, (sample-transferOffset)*transferScale); break; case 1: sample = tex3D(texChem1, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem1, (sample-transferOffset)*transferScale); break; case 2: sample = tex3D(texChem2, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem2, (sample-transferOffset)*transferScale); break; case 3: sample = tex3D(texChem3, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem3, (sample-transferOffset)*transferScale); break; case 4: sample = tex3D(texChem4, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem4, (sample-transferOffset)*transferScale); break; case 5: sample = tex3D(texChem5, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem5, (sample-transferOffset)*transferScale); break; case 6: sample = tex3D(texChem6, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem6, (sample-transferOffset)*transferScale); break; case 7: sample = tex3D(texChem7, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem7, (sample-transferOffset)*transferScale); break; } #endif // ECV_SAMPLE_CHEM } else { switch (ecmChemType) { case m_col: { sample = tex3D(texCol, posx, posy, posz); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture col = tex1D(transferTexCol, (sample-transferOffset)*transferScale); #ifdef ECV_SAMPLE_CHEM #ifdef ECV_INTERLEAVE float ecmf = 0.6f; float chmf = 1.0f-ecmf; sample_chem0 = 10.0f * tex3D(texChem0, posx, posy, posz); // sample_chem2 = 1.0f * tex3D(texChem2, posx_chem, posy_chem, posz_chem); // sample_chem4 = 1.0f * tex3D(texChem4, posx_chem, posy_chem, posz_chem); // sample_chem6 = 1.0f * tex3D(texChem6, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem0, (sample_chem0-transferOffset)*transferScale); // col_chem2 = tex1D(transferTexChem2, (sample_chem2-transferOffset)*transferScale); // col_chem4 = tex1D(transferTexChem4, (sample_chem4-transferOffset)*transferScale); // col_chem6 = tex1D(transferTexChem6, (sample_chem6-transferOffset)*transferScale); // blend // col_chem.x = 0.25f*col_chem0.x + 0.25f*col_chem2.x + 0.25f*col_chem4.x + 0.25f*col_chem6.x; // col_chem.y = 0.25f*col_chem0.y + 0.25f*col_chem2.y + 0.25f*col_chem4.y + 0.25f*col_chem6.y; // col_chem.z = 0.25f*col_chem0.z + 0.25f*col_chem2.z + 0.25f*col_chem4.z + 0.25f*col_chem6.z; // col_chem.w = 0.25f*col_chem0.w + 0.25f*col_chem2.w + 0.25f*col_chem4.w + 0.25f*col_chem6.w; col_chem.x = 1.0f*col_chem0.x;// + 1.0f*col_chem2.x + 1.0f*col_chem4.x + 1.0f*col_chem6.x; col_chem.y = 1.0f*col_chem0.y;// + 1.0f*col_chem2.y + 1.0f*col_chem4.y + 1.0f*col_chem6.y; col_chem.z = 1.0f*col_chem0.z;// + 1.0f*col_chem2.z + 1.0f*col_chem4.z + 1.0f*col_chem6.z; col_chem.w = 1.0f*col_chem0.w;// + 1.0f*col_chem2.w + 1.0f*col_chem4.w + 1.0f*col_chem6.w; // col.x = col_chem.x < 0.1f? chmf*col_chem.x + ecmf*col.x : chmf*col_chem.x + 1.0f*col.x; // col.y = col_chem.y < 0.1f? chmf*col_chem.y + ecmf*col.y : chmf*col_chem.y + 1.0f*col.y; // col.z = col_chem.z < 0.1f? chmf*col_chem.z + ecmf*col.z : chmf*col_chem.z + 1.0f*col.z; // col.w = col_chem.w < 0.1f? chmf*col_chem.w + ecmf*col.w : chmf*col_chem.w + 1.0f*col.w; col.x = chmf*col_chem.x + 1.0f*col.x; col.y = chmf*col_chem.y + 1.0f*col.y; col.z = chmf*col_chem.z + 1.0f*col.z; col.w = chmf*col_chem.w + 1.0f*col.w; #endif // ECV_INTERLEAVE #endif // ECV_SAMPLE_CHEM break; } case m_ela: sample = tex3D(texEla, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexEla, (sample-transferOffset)*transferScale); break; case m_hya: sample = tex3D(texHya, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexHya, (sample-transferOffset)*transferScale); break; } } col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { texCol.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; texEla.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; texHya.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void printCpyParams(hipMemcpy3DParms cp){ /** * struct hipArray *srcArray; struct hipPos srcPos; struct hipPitchedPtr srcPtr; struct hipArray *dstArray; struct hipPos dstPos; struct hipPitchedPtr dstPtr; struct hipExtent extent; enum hipMemcpyKind kind; */ printf("copy params:\n"); printf("\tsrcArray: %p\n", cp.srcArray); printf("\tsrcPos: %d, %d, %d\n", cp.srcPos.x, cp.srcPos.y, cp.srcPos.z); printf("\tsrcPtr:\n"); // if(cp.srcPtr != 0) // { printf("\t\tpitch: %d\n", cp.srcPtr.pitch); printf("\t\tptr: %p\n", cp.srcPtr.ptr); printf("\t\txsize: %d\n", cp.srcPtr.xsize); printf("\t\tysize: %d\n", cp.srcPtr.ysize); // } printf("\tdstArray: %p\n", cp.dstArray); printf("\tdstPos: %d, %d, %d\n", cp.dstPos.x, cp.dstPos.y, cp.dstPos.z); printf("\tdstPtr:\n"); // if(cp.dstPtr != 0) // { printf("\t\tpitch: %d\n", cp.dstPtr.pitch); printf("\t\tptr: %p\n", cp.dstPtr.ptr); printf("\t\txsize: %d\n", cp.dstPtr.xsize); printf("\t\tysize: %d\n", cp.dstPtr.ysize); // } printf("\textent: %d, %d, %d\n", cp.extent.width, cp.extent.height, cp.extent.depth); } #ifdef AVEP #ifdef AVEP_INC extern "C" void bufferECMmapAVEP( hipMemcpy3DParms copyParams, hipMemcpy3DParms svCopyParams, ecm_i ecmType, int incRound, float incFactor) { if (!incRound) // first round { // copy a subvolume from host into device buffer printf("\t\tcopying...\n"); checkCudaErrors(hipMemcpy3D(&svCopyParams)); printf("\t\tdone copying\n"); } // copy data from device buffer to device volume array printf("\t\tbuffering...\n"); VolumeType *d_Src = (VolumeType *) svCopyParams.dstPtr.ptr; VolumeType *d_Dst = (VolumeType *) copyParams.dstArray; bufferToVolumeAVEP( d_Dst, d_Src, (svCopyParams.extent.width)/sizeof(VolumeType), svCopyParams.extent.height, svCopyParams.extent.depth, copyParams.extent.width, copyParams.extent.height, copyParams.extent.depth, svCopyParams.srcPos, ecmType, incRound, incFactor); printf("\t\tdone buffering\n"); } #else // AVEP_INC extern "C" void bufferECMmapAVEP( hipMemcpy3DParms copyParams, hipMemcpy3DParms svCopyParams, ecm_i ecmType) { // copy a subvolume from host into device buffer printf("\t\tcopying...\n"); checkCudaErrors(hipMemcpy3D(&svCopyParams)); printf("\t\tdone copying\n"); // copy data from device buffer to device volume array printf("\t\tbuffering...\n"); VolumeType *d_Src = (VolumeType *) svCopyParams.dstPtr.ptr; VolumeType *d_Dst = (VolumeType *) copyParams.dstArray; bufferToVolumeAVEP( d_Dst, d_Src, (svCopyParams.extent.width)/sizeof(VolumeType), svCopyParams.extent.height, svCopyParams.extent.depth, copyParams.extent.width, copyParams.extent.height, copyParams.extent.depth, svCopyParams.srcPos, ecmType); printf("\t\tdone buffering\n"); } #endif // AVEP_INC #endif // AVEP #ifdef ECV_SAMPLE_CHEM #ifdef ECV_SAMPLE_CHEM_TEST // gets called in DiffusionHelper.cpp extern "C" void initCudaChemSample(hipExtent volumeSize, int chemIndex) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); checkCudaErrors(hipMalloc3DArray(&(d_chemsample_h[chemIndex]), &channelDesc, volumeSize, hipArraySurfaceLoadStore)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); switch (chemIndex) { case 0: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = hipFilterModeLinear; // linear interpolation texChem0.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // TNF: float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; hipArray *d_transferFuncArrayChem0; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem0.filterMode = hipFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } case 2: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem2, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem2.normalized = true; // access with normalized texture coordinates texChem2.filterMode = hipFilterModeLinear; // linear interpolation texChem2.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem2.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem2, d_chemsample_h[chemIndex], channelDesc)); // TGF: Purple-Turquoise float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.623520, 0.372549, 0.623529, 0.30, }, // 0.10 // purple // { 1.00, 0.32, 0.18, 0.30, }, // bright orange { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 1.00, 0.00, 0.60, }, // 0.60 // yellow { 0.196078, 0.60, 0.80, 0.80, }, // 0.80 // sky blue { 0.439216, 0.858824, 0.576471, 1.00, }, // Turquoise }; hipArray *d_transferFuncArrayChem2; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem2, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem2, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem2.filterMode = hipFilterModeLinear; transferTexChem2.normalized = true; // access with normalized texture coordinates transferTexChem2.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem2, d_transferFuncArrayChem2, channelDesc2)); break; } default: { printf("Chem Resolution Comparison: Wrong buffer index %d\n", chemIndex); exit(-1); } } } #else // ECV_SAMPLE_CHEM_TEST // gets called in DiffusionHelper.cpp extern "C" void initCudaChemSample(hipExtent volumeSize, int chemIndex) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); checkCudaErrors(hipMalloc3DArray(&(d_chemsample_h[chemIndex]), &channelDesc, volumeSize, hipArraySurfaceLoadStore)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); switch (chemIndex) { case 0: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = hipFilterModeLinear; // linear interpolation texChem0.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // TNF: float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; hipArray *d_transferFuncArrayChem0; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem0.filterMode = hipFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } case 1: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem1, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem1.normalized = true; // access with normalized texture coordinates texChem1.filterMode = hipFilterModeLinear; // linear interpolation texChem1.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem1.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem1, d_chemsample_h[chemIndex], channelDesc)); // TGF: Purple-Turquoise float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.623520, 0.372549, 0.623529, 0.30, }, // 0.10 // purple // { 1.00, 0.32, 0.18, 0.30, }, // bright orange { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 1.00, 0.00, 0.60, }, // 0.60 // yellow { 0.196078, 0.60, 0.80, 0.80, }, // 0.80 // sky blue { 0.439216, 0.858824, 0.576471, 1.00, }, // Turquoise }; hipArray *d_transferFuncArrayChem1; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem1, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem1, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem1.filterMode = hipFilterModeLinear; transferTexChem1.normalized = true; // access with normalized texture coordinates transferTexChem1.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem1, d_transferFuncArrayChem1, channelDesc2)); break; } case 2: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem2, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem2.normalized = true; // access with normalized texture coordinates texChem2.filterMode = hipFilterModeLinear; // linear interpolation texChem2.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem2.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem2, d_chemsample_h[chemIndex], channelDesc)); // FGF: Brown float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.647059, 0.164706, 0.164706, 0.05, }, // 0.05 { 0.647059, 0.164706, 0.164706, 0.10, }, // 0.10 { 0.647059, 0.164706, 0.164706, 0.15, }, // 0.15 { 0.647059, 0.164706, 0.164706, 0.20, }, // 0.20 { 0.647059, 0.164706, 0.164706, 0.25, }, // 0.25 { 0.647059, 0.164706, 0.164706, 0.30, }, // 0.30 { 0.647059, 0.164706, 0.164706, 0.35, }, // 0.35 { 0.647059, 0.164706, 0.164706, 0.40, }, // 0.40 { 0.647059, 0.164706, 0.164706, 0.45, }, // 0.45 { 0.647059, 0.164706, 0.164706, 0.50, }, // 0.50 { 0.647059, 0.164706, 0.164706, 0.55, }, // 0.55 { 0.647059, 0.164706, 0.164706, 0.60, }, // 0.60 { 0.647059, 0.164706, 0.164706, 0.65, }, // 0.65 { 0.647059, 0.164706, 0.164706, 0.70, }, // 0.70 { 0.647059, 0.164706, 0.164706, 0.75, }, // 0.75 { 0.647059, 0.164706, 0.164706, 0.80, }, // 0.80 { 0.647059, 0.164706, 0.164706, 0.85, }, // 0.85 { 0.647059, 0.164706, 0.164706, 0.90, }, // 0.90 { 0.647059, 0.164706, 0.164706, 0.95, }, // 0.95 { 0.647059, 0.164706, 0.164706, 1.00, }, // 1.00 }; hipArray *d_transferFuncArrayChem2; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem2, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem2, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem2.filterMode = hipFilterModeLinear; transferTexChem2.normalized = true; // access with normalized texture coordinates transferTexChem2.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem2, d_transferFuncArrayChem2, channelDesc2)); break; } case 3: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem3, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem3.normalized = true; // access with normalized texture coordinates texChem3.filterMode = hipFilterModeLinear; // linear interpolation texChem3.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem3.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem3, d_chemsample_h[chemIndex], channelDesc)); // MMP8: Sky blue float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.196078, 0.600000, 0.800000, 0.05, }, // 0.05 { 0.196078, 0.600000, 0.800000, 0.10, }, // 0.10 { 0.196078, 0.600000, 0.800000, 0.15, }, // 0.15 { 0.196078, 0.600000, 0.800000, 0.20, }, // 0.20 { 0.196078, 0.600000, 0.800000, 0.25, }, // 0.25 { 0.196078, 0.600000, 0.800000, 0.30, }, // 0.30 { 0.196078, 0.600000, 0.800000, 0.35, }, // 0.35 { 0.196078, 0.600000, 0.800000, 0.40, }, // 0.40 { 0.196078, 0.600000, 0.800000, 0.45, }, // 0.45 { 0.196078, 0.600000, 0.800000, 0.50, }, // 0.50 { 0.196078, 0.600000, 0.800000, 0.55, }, // 0.55 { 0.196078, 0.600000, 0.800000, 0.60, }, // 0.60 { 0.196078, 0.600000, 0.800000, 0.65, }, // 0.65 { 0.196078, 0.600000, 0.800000, 0.70, }, // 0.70 { 0.196078, 0.600000, 0.800000, 0.75, }, // 0.75 { 0.196078, 0.600000, 0.800000, 0.80, }, // 0.80 { 0.196078, 0.600000, 0.800000, 0.85, }, // 0.85 { 0.196078, 0.600000, 0.800000, 0.90, }, // 0.90 { 0.196078, 0.600000, 0.800000, 0.95, }, // 0.95 { 0.196078, 0.600000, 0.800000, 1.00, }, // 1.00 }; hipArray *d_transferFuncArrayChem; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem3, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem3, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem3.filterMode = hipFilterModeLinear; transferTexChem3.normalized = true; // access with normalized texture coordinates transferTexChem3.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem3, d_transferFuncArrayChem3, channelDesc2)); break; } case 4: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem4, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem4.normalized = true; // access with normalized texture coordinates texChem4.filterMode = hipFilterModeLinear; // linear interpolation texChem4.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem4.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem4, d_chemsample_h[chemIndex], channelDesc)); // IL1: Green Beach from https://digitalsynopsis.com/design/beautiful-color-ui-gradients-backgrounds/ float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.01, 0.67, 0.69, 0.50, }, // blue-mild green { 0.00, 0.80, 0.67, 1.00, }, // light blue-mild green }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayChem4; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem4, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem4, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem4.filterMode = hipFilterModeLinear; transferTexChem4.normalized = true; // access with normalized texture coordinates transferTexChem4.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem4, d_transferFuncArrayChem4, channelDesc2)); break; } case 5: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem5, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem5.normalized = true; // access with normalized texture coordinates texChem5.filterMode = hipFilterModeLinear; // linear interpolation texChem5.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem5.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem5, d_chemsample_h[chemIndex], channelDesc)); // IL6: Pink float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.737255, 0.560784, 0.560784, 0.05, }, // 0.05 { 0.737255, 0.560784, 0.560784, 0.10, }, // 0.10 { 0.737255, 0.560784, 0.560784, 0.15, }, // 0.15 { 0.737255, 0.560784, 0.560784, 0.20, }, // 0.20 { 0.737255, 0.560784, 0.560784, 0.25, }, // 0.25 { 0.737255, 0.560784, 0.560784, 0.30, }, // 0.30 { 0.737255, 0.560784, 0.560784, 0.35, }, // 0.35 { 0.737255, 0.560784, 0.560784, 0.40, }, // 0.40 { 0.737255, 0.560784, 0.560784, 0.45, }, // 0.45 { 0.737255, 0.560784, 0.560784, 0.50, }, // 0.50 { 0.737255, 0.560784, 0.560784, 0.55, }, // 0.55 { 0.737255, 0.560784, 0.560784, 0.60, }, // 0.60 { 0.737255, 0.560784, 0.560784, 0.65, }, // 0.65 { 0.737255, 0.560784, 0.560784, 0.70, }, // 0.70 { 0.737255, 0.560784, 0.560784, 0.75, }, // 0.75 { 0.737255, 0.560784, 0.560784, 0.80, }, // 0.80 { 0.737255, 0.560784, 0.560784, 0.85, }, // 0.85 { 0.737255, 0.560784, 0.560784, 0.90, }, // 0.90 { 0.737255, 0.560784, 0.560784, 0.95, }, // 0.95 { 0.737255, 0.560784, 0.560784, 1.00, }, // 1.00 }; hipArray *d_transferFuncArrayChem5; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem5, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem5, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem5.filterMode = hipFilterModeLinear; transferTexChem5.normalized = true; // access with normalized texture coordinates transferTexChem5.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem5, d_transferFuncArrayChem5, channelDesc2)); break; } case 6: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem6, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem6.normalized = true; // access with normalized texture coordinates texChem6.filterMode = hipFilterModeLinear; // linear interpolation texChem6.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem6.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // IL8: Orange-Yellow float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; hipArray *d_transferFuncArrayChem6; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem6, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem6, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem6.filterMode = hipFilterModeLinear; transferTexChem6.normalized = true; // access with normalized texture coordinates transferTexChem6.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem6, d_transferFuncArrayChem6, channelDesc2)); break; } case 7: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem7, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem7.normalized = true; // access with normalized texture coordinates texChem7.filterMode = hipFilterModeLinear; // linear interpolation texChem7.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem7.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem7, d_chemsample_h[chemIndex], channelDesc)); // IL10: White float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.26, 0.81, 0.64, 0.30, }, // 0.10 // green { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 0.11, 0.68, 0.60, }, // 0.60 { 0.678431, 0.917647, 0.917647, 0.80, }, // 0.80 { 0.00, 0.00, 1.00, 1.00, }, // 1.00 }; hipArray *d_transferFuncArrayChem7; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem7, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem7, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem7.filterMode = hipFilterModeLinear; transferTexChem7.normalized = true; // access with normalized texture coordinates transferTexChem7.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem7, d_transferFuncArrayChem7, channelDesc2)); break; } default: { // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = hipFilterModeLinear; // linear interpolation texChem0.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 1.00, 0.00, 1.00, 0.05, }, // 0.05 { 1.00, 0.05, 0.90, 0.10, }, // 0.10 { 0.80, 0.10, 0.80, 0.15, }, // 0.15 { 0.60, 0.15, 0.70, 0.20, }, // 0.20 { 0.40, 0.20, 0.60, 0.25, }, // 0.25 { 0.20, 0.25, 0.50, 0.30, }, // 0.30 { 0.00, 0.30, 0.40, 0.35, }, // 0.35 { 0.40, 0.35, 0.30, 0.40, }, // 0.40 { 0.60, 0.40, 0.20, 0.45, }, // 0.45 { 0.70, 0.45, 0.10, 0.50, }, // 0.50 { 0.80, 0.45, 0.00, 0.55, }, // 0.55 { 0.90, 0.50, 0.00, 0.60, }, // 0.60 { 1.00, 0.50, 0.00, 0.65, }, // 0.65 { 1.00, 0.50, 0.00, 0.70, }, // 0.70 { 1.00, 0.50, 0.00, 0.75, }, // 0.75 { 1.00, 0.50, 0.00, 0.80, }, // 0.80 { 1.00, 0.50, 0.00, 0.85, }, // 0.85 { 1.00, 0.50, 0.00, 0.90, }, // 0.90 { 1.00, 0.50, 0.00, 0.95, }, // 0.95 { 1.00, 0.50, 0.00, 1.00, }, // 1.00 }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayChem0; checkCudaErrors(hipMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexChem0.filterMode = hipFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } } } #endif // ECV_SAMPLE_CHEM_TEST #endif // ECV_SAMPLE_CHEM extern "C" void bufferECMmap(hipMemcpy3DParms copyParams) { checkCudaErrors(hipMemcpy3D(&copyParams)); } extern "C" void initCuda(void *h_volume, hipExtent volumeSize, hipMemcpy3DParms &copyParams, ecm_i ecmType) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); checkCudaErrors(hipMalloc3DArray(&(d_volumeArray[ecmType]), &channelDesc, volumeSize, hipArraySurfaceLoadStore)); // copy data to 3D array copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray[ecmType]; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors(hipMemcpy3D(&copyParams)); //create transfer function texture switch(ecmType) { case m_col: { // Collagen float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.0, }, // 0.00 { 1.00, 0.00, 0.00, 0.5, }, // 0.05 - SLP ILP { 1.00, 0.30, 0.30, 0.8, }, // 0.10 - SLP ILP { 0.00, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 0.80, 0.15, 0.10, 0.2, }, // 0.20 - DLP ILP SLP { 0.80, 0.15, 0.10, 0.3, }, // 0.25 - DLP { 0.80, 0.15, 0.10, 0.4, }, // 0.30 - DLP { 0.80, 0.15, 0.10, 0.5, }, // 0.35 - DLP { 0.80, 0.15, 0.10, 0.6, }, // 0.40 - DLP { 0.80, 0.15, 0.10, 0.7, }, // 0.45 { 0.80, 0.15, 0.10, 0.8, }, // 0.50 { 0.80, 0.15, 0.10, 0.9, }, // 0.55 { 0.85, 0.10, 0.15, 0.8, }, // 0.60 { 0.90, 0.10, 0.15, 0.7, }, // 0.65 { 0.95, 0.10, 0.10, 0.6, }, // 0.70 { 1.00, 0.10, 0.10, 0.5, }, // 0.75 { 1.00, 0.10, 0.10, 0.6, }, // 0.80 { 1.00, 0.10, 0.10, 0.7, }, // 0.85 { 1.00, 0.20, 0.20, 0.8, }, // 0.90 { 1.00, 0.30, 0.30, 0.9, }, // 0.95 { 1.00, 0.60, 0.00, 1.0, }, // 1.00 { 0.60, 0.40, 0.32, 1.0, }, }; #ifdef AVEP // bind array to 3D surface checkCudaErrors(hipBindSurfaceToArray(srfCol, d_volumeArray[ecmType], channelDesc)); // checkCudaErrors(hipBindSurfaceToArray(srfCol, d_volumeArray[ecmType])); #endif // set texture parameters texCol.normalized = true; // access with normalized texture coordinates texCol.filterMode = hipFilterModeLinear; // linear interpolation texCol.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texCol.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture checkCudaErrors(hipBindTextureToArray(texCol, d_volumeArray[ecmType], channelDesc)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayCol; checkCudaErrors(hipMallocArray(&d_transferFuncArrayCol, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayCol, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexCol.filterMode = hipFilterModeLinear; transferTexCol.normalized = true; // access with normalized texture coordinates transferTexCol.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexCol, d_transferFuncArrayCol, channelDesc2)); break; } case m_ela: { / // Elastin float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, // 0.00 { 0.0, 1.0, 0.0, 0.5, }, // 0.05 - SLP ILP { 0.0, 0.0, 0.0, 0.0, }, // 0.10 - SLP ILP { 0.63, 0.12, 0.4, 0.3, }, // 0.15 - SLP ILP { 0.63, 0.12, 0.4, 0.4, }, // 0.20 - DLP ILP SLP { 0.63, 0.12, 0.4, 0.5, }, // 0.25 - DLP { 0.63, 0.12, 0.4, 0.6, }, // 0.30 - DLP { 0.63, 0.12, 0.4, 0.7, }, // 0.35 - DLP { 0.63, 0.12, 0.4, 0.8, }, // 0.40 - DLP { 0.63, 0.12, 0.4, 0.9, }, // 0.45 { 0.0, 1.0, 0.30, 1.0, }, // 0.50 { 0.0, 1.0, 0.30, 1.0, }, // 0.55 { 0.0, 1.0, 0.30, 1.0, }, // 0.60 { 0.0, 1.0, 0.30, 1.0, }, // 0.65 { 0.0, 1.0, 0.30, 1.0, }, // 0.70 { 0.0, 1.0, 0.30, 1.0, }, // 0.75 { 0.0, 0.0, 0.0, 0.0, }, // 0.80 { 0.0, 1.0, 0.30, 1.0, }, // 0.85 { 0.0, 1.0, 0.40, 0.5, }, // 0.90 { 0.0, 1.0, 0.50, 0.7, }, // 0.95 { 0.0, 1.0, 0.60, 1.0, }, // 1.00 }; // set texture parameters texEla.normalized = true; // access with normalized texture coordinates texEla.filterMode = hipFilterModeLinear; // linear interpolation texEla.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texEla.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(hipBindSurfaceToArray(srfEla, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(hipBindTextureToArray(texEla, d_volumeArray[ecmType], channelDesc)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayEla; checkCudaErrors(hipMallocArray(&d_transferFuncArrayEla, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayEla, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexEla.filterMode = hipFilterModeLinear; transferTexEla.normalized = true; // access with normalized texture coordinates transferTexEla.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexEla, d_transferFuncArrayEla, channelDesc2)); break; } case m_hya: { // Hyaluronan #ifdef RAT_VF float4 transferFunc[] = { { 0.0, 0.00, 0.00, 0.0, }, // 0.00 { 0.0, 0.00, 1.00, 0.5, }, // 0.05 - SLP ILP { 0.3, 0.30, 1.00, 0.8, }, // 0.10 - SLP ILP { 0.0, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 1.0, 0.43, 0.78, 0.2, }, // 0.20 - DLP ILP SLP { 1.0, 0.43, 0.78, 0.3, }, // 0.25 - DLP { 1.0, 0.43, 0.78, 0.4, }, // 0.30 - DLP { 1.0, 0.43, 0.78, 0.5, }, // 0.35 - DLP { 1.0, 0.43, 0.78, 0.6, }, // 0.40 - DLP { 1.0, 0.43, 0.78, 0.7, }, // 0.45 { 1.0, 0.43, 0.78, 0.8, }, // 0.50 { 1.0, 0.43, 0.78, 0.9, }, // 0.55 { 0.8, 0.33, 0.85, 0.8, }, // 0.60 { 0.5, 0.23, 0.90, 0.7, }, // 0.65 { 0.3, 0.13, 0.95, 0.6, }, // 0.70 { 0.0, 0.00, 1.00, 0.5, }, // 0.75 { 0.1, 0.10, 1.00, 0.6, }, // 0.80 { 0.2, 0.20, 1.00, 0.7, }, // 0.85 { 0.3, 0.30, 1.00, 0.8, }, // 0.90 { 0.4, 0.40, 1.00, 0.9, }, // 0.95 { 0.7, 0.70, 1.00, 1.0, }, // 1.00 }; #else // RAT_VF float4 transferFunc[] = { { 0.0, 0.00, 0.00, 0.0, }, // 0.00 { 0.0, 0.00, 1.00, 0.5, }, // 0.05 - SLP ILP { 0.3, 0.30, 1.00, 0.8, }, // 0.10 - SLP ILP { 0.0, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 0.1, 0.43, 0.78, 0.2, }, // 0.20 - DLP ILP SLP { 0.2, 0.43, 0.78, 0.3, }, // 0.25 - DLP { 0.3, 0.43, 0.78, 0.4, }, // 0.30 - DLP { 0.4, 0.43, 0.78, 0.5, }, // 0.35 - DLP { 0.5, 0.43, 0.78, 0.6, }, // 0.40 - DLP { 0.6, 0.43, 0.78, 0.7, }, // 0.45 { 0.7, 0.43, 0.78, 0.8, }, // 0.50 { 0.7, 0.43, 0.78, 0.9, }, // 0.55 { 0.8, 0.33, 0.85, 0.8, }, // 0.60 { 0.5, 0.23, 0.90, 0.7, }, // 0.65 { 0.3, 0.13, 0.95, 0.6, }, // 0.70 { 0.0, 0.00, 1.00, 0.5, }, // 0.75 { 0.1, 0.10, 1.00, 0.6, }, // 0.80 { 0.2, 0.20, 1.00, 0.7, }, // 0.85 { 0.3, 0.30, 1.00, 0.8, }, // 0.90 { 0.4, 0.40, 1.00, 0.9, }, // 0.95 { 0.3, 0.10, 1.00, 1.0, }, // 1.00 { 0.0, 0.00, 1.00, 1.0, }, // 1.00 }; #endif // RAT_VF // set texture parameters texHya.normalized = true; // access with normalized texture coordinates texHya.filterMode = hipFilterModeLinear; // linear interpolation texHya.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texHya.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(hipBindSurfaceToArray(srfHya, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(hipBindTextureToArray(texHya, d_volumeArray[ecmType], channelDesc)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayHya; checkCudaErrors(hipMallocArray(&d_transferFuncArrayHya, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayHya, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexHya.filterMode = hipFilterModeLinear; transferTexHya.normalized = true; // access with normalized texture coordinates transferTexHya.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexHya, d_transferFuncArrayHya, channelDesc2)); break; } default: { // WEF // White bg float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 0.97, 0.8, 0.72, 1.0, }, { 0.97, 0.8, 0.72, 0.5, }, { 0.80, 0.6, 0.52, 0.7, }, { 0.60, 0.4, 0.32, 1.0, },//0.5, }, }; // // Black bg // float4 transferFunc[] = // { // { 0.0, 0.0, 0.0, 0.0, }, // { 1.0, 0.0, 0.0, 1.0, }, // { 1.0, 0.0, 0.0, 1.0, }, // { 0.97, 0.8, 0.72, 1.0, }, // { 0.97, 0.4, 0.30, 1.0, }, // { 0.97, 0.6, 0.50, 0.7, }, // { 0.97, 0.8, 0.72, 0.8, },//0.5, }, // }; // set texture parameters texCol.normalized = true; // access with normalized texture coordinates texCol.filterMode = hipFilterModeLinear; // linear interpolation texCol.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texCol.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(hipBindSurfaceToArray(srfCol, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(hipBindTextureToArray(texCol, d_volumeArray[ecmType], channelDesc)); hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArrayCol; checkCudaErrors(hipMallocArray(&d_transferFuncArrayCol, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpyToArray(d_transferFuncArrayCol, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTexCol.filterMode = hipFilterModeLinear; transferTexCol.normalized = true; // access with normalized texture coordinates transferTexCol.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(hipBindTextureToArray(transferTexCol, d_transferFuncArrayCol, channelDesc2)); break; } } } #ifdef AVEP extern "C" void initCudaAVEP( void *h_volume, hipExtent volumeSize, hipMemcpy3DParms &copyParams, hipMemcpy3DParms &svCopyParams, ecm_i ecmType) { initCuda(h_volume, volumeSize, copyParams, ecmType); #ifdef AVEP_INC // Allocate buffer device memory checkCudaErrors(hipMalloc(&(d_svBuffer[ecmType]), volumeSize.width*volumeSize.height*volumeSize.depth*sizeof(VolumeType))); svCopyParams.dstPtr = make_hipPitchedPtr( d_svBuffer[ecmType], volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); #else // AVEP_INC // Allocate buffer device memory checkCudaErrors(hipMalloc(&(d_svBuffer[ecmType]), SV_W*SV_H*SV_D*sizeof(VolumeType))); svCopyParams.dstPtr = make_hipPitchedPtr( d_svBuffer[ecmType], SV_W*sizeof(VolumeType), SV_W, SV_H); #endif // AVEP_INC // initialize copy params for sub-volumes svCopyParams.srcPtr = copyParams.srcPtr; svCopyParams.extent = make_hipExtent(SV_W*sizeof(VolumeType), SV_H, SV_D); svCopyParams.kind = hipMemcpyHostToDevice; } #endif extern "C" void freeCudaBuffers() { for(int ei = 0; ei < m_ecmtotal; ei++) { checkCudaErrors(hipFreeArray(d_volumeArray[ei])); } checkCudaErrors(hipFreeArray(d_transferFuncArrayCol)); checkCudaErrors(hipFreeArray(d_transferFuncArrayEla)); checkCudaErrors(hipFreeArray(d_transferFuncArrayHya)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void render_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int ecmChemType, bool isChem) { hipLaunchKernelGGL(( d_render_dim), dim3(gridSize), dim3(blockSize), 0, 0, d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, ecmChemType, isChem); } extern "C" void render_sp_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int gpu_id) { hipLaunchKernelGGL(( d_render_sp_dim), dim3(gridSize), dim3(blockSize), 0, 0, d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, gpu_id); } #ifdef ECV_SAMPLE_CHEM_TEST extern "C" void render_test_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int chemType, bool isHigh) { hipLaunchKernelGGL(( d_render_test_dim), dim3(gridSize), dim3(blockSize), 0, 0, d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, chemType, isHigh); } #endif extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
4ae99bdc56567145d7cd87d5b17d4c9327c34b43.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Modified by Nuttiiya Seekhao to support volume rendering of float value // from main memory // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <assert.h> #include <helper_cuda.h> #include <helper_math.h> #include "../../enums.h" // Note: Originally, including common_vis worked. However, it stopped working due to GLX inclusion. // TODO: Separate GLX includes and definitions #include "../../common.h" //#include "./common_vis.h" //#include "./VolumeManager.h" typedef unsigned int uint; typedef unsigned char uchar; //typedef unsigned char VolumeType; typedef float VolumeType; VolumeType *d_svBuffer[m_ecmtotal] = {0}; //cudaArray *d_svArray[m_ecmtotal] = {0}; cudaArray *d_volumeArray[m_ecmtotal] = {0}; #ifdef ECV_SAMPLE_CHEM cudaArray *d_chemsample_h[TOTAL_CHEM]; #endif cudaArray *d_transferFuncArrayCol = {0}; cudaArray *d_transferFuncArrayEla = {0}; cudaArray *d_transferFuncArrayHya = {0}; #ifdef AVEP surface<void, cudaSurfaceType3D> srfCol; surface<void, cudaSurfaceType3D> srfEla; surface<void, cudaSurfaceType3D> srfHya; #endif // AVEP texture<VolumeType, 3, cudaReadModeElementType> texCol; texture<VolumeType, 3, cudaReadModeElementType> texEla; texture<VolumeType, 3, cudaReadModeElementType> texHya; //texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, cudaReadModeElementType> transferTexCol; // 1D transfer function texture texture<float4, 1, cudaReadModeElementType> transferTexEla; texture<float4, 1, cudaReadModeElementType> transferTexHya; #ifdef ECV_SAMPLE_CHEM texture<VolumeType, 3, cudaReadModeElementType> texChem0; texture<VolumeType, 3, cudaReadModeElementType> texChem1; texture<VolumeType, 3, cudaReadModeElementType> texChem2; texture<VolumeType, 3, cudaReadModeElementType> texChem3; texture<VolumeType, 3, cudaReadModeElementType> texChem4; texture<VolumeType, 3, cudaReadModeElementType> texChem5; texture<VolumeType, 3, cudaReadModeElementType> texChem6; texture<VolumeType, 3, cudaReadModeElementType> texChem7; surface<void, cudaSurfaceType3D> srfChem0; surface<void, cudaSurfaceType3D> srfChem1; surface<void, cudaSurfaceType3D> srfChem2; surface<void, cudaSurfaceType3D> srfChem3; surface<void, cudaSurfaceType3D> srfChem4; surface<void, cudaSurfaceType3D> srfChem5; surface<void, cudaSurfaceType3D> srfChem6; surface<void, cudaSurfaceType3D> srfChem7; texture<float4, 1, cudaReadModeElementType> transferTexChem0; texture<float4, 1, cudaReadModeElementType> transferTexChem1; texture<float4, 1, cudaReadModeElementType> transferTexChem2; texture<float4, 1, cudaReadModeElementType> transferTexChem3; texture<float4, 1, cudaReadModeElementType> transferTexChem4; texture<float4, 1, cudaReadModeElementType> transferTexChem5; texture<float4, 1, cudaReadModeElementType> transferTexChem6; texture<float4, 1, cudaReadModeElementType> transferTexChem7; cudaArray *d_transferFuncArrayChem0 = {0}; cudaArray *d_transferFuncArrayChem1 = {0}; cudaArray *d_transferFuncArrayChem2 = {0}; cudaArray *d_transferFuncArrayChem3 = {0}; cudaArray *d_transferFuncArrayChem4 = {0}; cudaArray *d_transferFuncArrayChem5 = {0}; cudaArray *d_transferFuncArrayChem6 = {0}; cudaArray *d_transferFuncArrayChem7 = {0}; #endif // ECV_SAMPLE_CHEM typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm //Round a / b to nearest higher integer value int iDivUp_AVEP(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } #ifdef ECV_SAMPLE_CHEM #ifdef ECV_SAMPLE_CHEM_TEST __device__ float smult[TOTAL_CHEM] = { 50000.0f, 10000.0f, 1000000.0f, 100.0f, 10000.0f, 100000.0f, 100000.0f, 10000.0f }; // sample multiplier __global__ void sampleChem_kernel( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; const bool validZ = (0 <= z) && (z < dataD); const bool validY = (0 <= y) && (y < dataH); const bool validX = (0 <= x) && (x < dataW); const bool validZ_h = validZ && (z%ECV_SAMPLE_STRIDE_HGH == 0); const bool validY_h = validY && (y%ECV_SAMPLE_STRIDE_HGH == 0); const bool validX_h = validX && (x%ECV_SAMPLE_STRIDE_HGH == 0); const bool validZ_l = validZ && (z%ECV_SAMPLE_STRIDE_LOW == 0); const bool validY_l = validY && (y%ECV_SAMPLE_STRIDE_LOW == 0); const bool validX_l = validX && (x%ECV_SAMPLE_STRIDE_LOW == 0); const int sampleW_l = dataW / ECV_SAMPLE_STRIDE_LOW; const int sampleH_l = dataH / ECV_SAMPLE_STRIDE_LOW; const int sampleD_l = dataD / ECV_SAMPLE_STRIDE_LOW; const int sampleW_h = dataW / ECV_SAMPLE_STRIDE_HGH; const int sampleH_h = dataH / ECV_SAMPLE_STRIDE_HGH; const int sampleD_h = dataD / ECV_SAMPLE_STRIDE_HGH; int dx_l = x/ECV_SAMPLE_STRIDE_LOW; int dy_l = y/ECV_SAMPLE_STRIDE_LOW; int dz_l = z/ECV_SAMPLE_STRIDE_LOW; int dx_h = x/ECV_SAMPLE_STRIDE_HGH; int dy_h = y/ECV_SAMPLE_STRIDE_HGH; int dz_h = z/ECV_SAMPLE_STRIDE_HGH; const bool validDx_l = (dx_l < sampleW_l); const bool validDy_l = (dy_l < sampleH_l); const bool validDz_l = (dz_l < sampleD_l); const bool validDx_h = (dx_h < sampleW_h); const bool validDy_h = (dy_h < sampleH_h); const bool validDz_h = (dz_h < sampleD_h); if (validZ_h && validY_h && validX_h && validDx_h && validDy_h && validDz_h) { float sample = d_Src[z * dataH * dataW + y * dataW + x] * 50000000.0f;//*smult[chemIndex]; if (sample > 1.0f) sample = 1.0f; // if (sample < 0.01f) sample = 1.0f; surf3Dwrite(sample, srfChem0, dx_h * sizeof(float), dy_h, dz_h); } if (validZ_l && validY_l && validX_l && validDx_l && validDy_l && validDz_l) { float sample = d_Src[z * dataH * dataW + y * dataW + x] * 50000000.0f;//*smult[chemIndex]; if (sample > 1.0f) sample = 1.0f; // if (sample < 0.001f) sample = 1.0f; surf3Dwrite(sample, srfChem2, dx_l * sizeof(float), dy_l, dz_l); } } #else // ECV_SAMPLE_CHEM_TEST __global__ void sampleChem_kernel( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; const bool validZ = (0 <= z) && (z < dataD) && (z%ECV_SAMPLE_STRIDE == 0); const bool validY = (0 <= y) && (y < dataH) && (y%ECV_SAMPLE_STRIDE == 0); const bool validX = (0 <= x) && (x < dataW) && (x%ECV_SAMPLE_STRIDE == 0); // const bool validz = (z >= 0) && (z < datad); // const bool validy = (y >= 0) && (y < datah); // const bool validx = (x >= 0) && (x < dataw); const int sampleW = dataW / ECV_SAMPLE_STRIDE; const int sampleH = dataH / ECV_SAMPLE_STRIDE; const int sampleD = dataD / ECV_SAMPLE_STRIDE; int dx = x/ECV_SAMPLE_STRIDE; int dy = y/ECV_SAMPLE_STRIDE; int dz = z/ECV_SAMPLE_STRIDE; const bool validDx = (dx < sampleW); const bool validDy = (dy < sampleH); const bool validDz = (dz < sampleD); if (validZ && validY && validX && validDx && validDy && validDz) { float sample = d_Src[z * dataH * dataW + y * dataW + x]; switch (chemIndex) { case 0: sample *= 50000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem0, dx * sizeof(float), dy, dz); break; case 1: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem1, dx * sizeof(float), dy, dz); break; case 2: sample *= 1000000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem2, dx * sizeof(float), dy, dz); break; case 3: sample *= 100.0f;//5000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem3, dx * sizeof(float), dy, dz); break; case 4: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem4, dx * sizeof(float), dy, dz); break; case 5: sample *= 100000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem5, dx * sizeof(float), dy, dz); break; case 6: sample *= 100000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem6, dx * sizeof(float), dy, dz); break; case 7: sample *= 10000.0f; if (sample > 1.0f) sample = 1.0f; surf3Dwrite(sample, srfChem7, dx * sizeof(float), dy, dz); break; } } } #endif // ECV_SAMPLE_CHEM_TEST extern "C" void sampleChem( float *d_Src, int dataD, int dataH, int dataW, int chemIndex ) { dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(dataW, threads.x), iDivUp_AVEP(dataH, threads.y), iDivUp_AVEP(dataD, threads.z)); printf(" sampling chem [%dx%dx%d] ...\n", dataW, dataH, dataD); sampleChem_kernel<<<grid, threads>>>( d_Src, dataD, dataH, dataW, chemIndex ); getLastCudaError("sampleChem_kernel<<<>>> execution failed\n"); } #endif // ECV_SAMPLE_CHEM #ifdef AVEP #ifdef AVEP_INC __global__ void bufferToVolumeAVEP_round0_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, cudaPos offset, ecm_i ecmType) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float sample_dst = 0.0f; switch (ecmType) { case m_col: surf3Dread(&sample_dst, srfCol, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; case m_ela: surf3Dread(&sample_dst, srfEla, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; case m_hya: surf3Dread(&sample_dst, srfHya, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; default: surf3Dread(&sample_dst, srfCol, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; } // write diff to source d_Src[vz*volumeW*volumeH + vy*volumeW + vx] -= sample_dst; if (d_Src[vz*volumeW*volumeH + vy*volumeW + vx] == 0.0f) d_Src[vz*volumeW*volumeH + vy*volumeW + vx] = 0.4f; } } __global__ void bufferToVolumeAVEP_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, cudaPos offset, ecm_i ecmType, int incRound, float incFactor) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float multiplier = ((float) (incRound + 1))*incFactor; float sample = 0.0f; switch (ecmType) { case m_col: surf3Dread(&sample, srfCol, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz); break; case m_ela: surf3Dread(&sample, srfEla, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfEla, vx * sizeof(VolumeType), vy, vz); break; case m_hya: surf3Dread(&sample, srfHya, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfHya, vx * sizeof(VolumeType), vy, vz); break; default: surf3Dread(&sample, srfCol, vx * sizeof(VolumeType), vy, vz); sample += multiplier*d_Src[vz*volumeW*volumeH + vy*volumeW + vx]; surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz); break; } } } extern "C" void bufferToVolumeAVEP( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, cudaPos offset, ecm_i ecmType, int incRound, float incFactor) { assert(d_Src != d_Dst); assert(svW <= volumeW); assert(svH <= volumeH); assert(svD <= volumeD); dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(svW, threads.x), iDivUp_AVEP(svH, threads.y), iDivUp_AVEP(svD, threads.z)); if (!incRound) // round 0 { bufferToVolumeAVEP_round0_kernel<<<grid, threads>>>( d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType ); bufferToVolumeAVEP_kernel<<<grid, threads>>>( d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType, incRound, incFactor ); } else { bufferToVolumeAVEP_kernel<<<grid, threads>>>( d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType, incRound, incFactor ); } getLastCudaError("bufferToVolumeAVEP_kernel<<<>>> execution failed\n"); } #else // AVEP_INC __global__ void bufferToVolumeAVEP_kernel( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, cudaPos offset, ecm_i ecmType) { const int z = blockDim.z * blockIdx.z + threadIdx.z; const int y = blockDim.y * blockIdx.y + threadIdx.y; const int x = blockDim.x * blockIdx.x + threadIdx.x; bool isInBound = (0 <= x && x < svW) && (0 <= y && y < svH) && (0 <= z && z < svD); if (isInBound) { int vx = x + (offset.x)/sizeof(VolumeType); int vy = y + offset.y; int vz = z + offset.z; float sample = d_Src[z*SV_W*SV_H + y*SV_W + x]; // use buffer dimensions switch (ecmType) { case m_col: surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; case m_ela: surf3Dwrite(sample, srfEla, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; case m_hya: surf3Dwrite(sample, srfHya, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; default: surf3Dwrite(sample, srfCol, vx * sizeof(VolumeType), vy, vz);//, cudaBoundaryModeZero); break; } } } extern "C" void bufferToVolumeAVEP( float *d_Dst, float *d_Src, int svW, int svH, int svD, int volumeW, int volumeH, int volumeD, cudaPos offset, ecm_i ecmType) { assert(d_Src != d_Dst); assert(svW <= volumeW); assert(svH <= volumeH); assert(svD <= volumeD); dim3 threads(8, 8, 4); dim3 grid(iDivUp_AVEP(svW, threads.x), iDivUp_AVEP(svH, threads.y), iDivUp_AVEP(svD, threads.z)); bufferToVolumeAVEP_kernel<<<grid, threads>>>( d_Dst, d_Src, svW, svH, svD, volumeW, volumeH, volumeD, offset, ecmType ); getLastCudaError("bufferToVolumeAVEP_kernel<<<>>> execution failed\n"); } #endif // AVEP_INC #endif __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float sample = tex3D(texCol, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D(transferTexCol, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } #ifdef ECV_SAMPLE_CHEM_TEST __global__ void d_render_test_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int chemType, bool isHighRes) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); float sample_chem; float4 col_chem; if (isHighRes) { sample_chem = tex3D(texChem0, posx, posy, posz); } else { // low resolution sample_chem = tex3D(texChem2, posx, posy, posz); } // lookup in transfer function texture switch (chemType) { case 0: col_chem = tex1D(transferTexChem0, (sample_chem-transferOffset)*transferScale); break; case 1: col_chem = tex1D(transferTexChem1, (sample_chem-transferOffset)*transferScale); break; case 2: col_chem = tex1D(transferTexChem2, (sample_chem-transferOffset)*transferScale); break; case 3: col_chem = tex1D(transferTexChem3, (sample_chem-transferOffset)*transferScale); break; case 4: col_chem = tex1D(transferTexChem4, (sample_chem-transferOffset)*transferScale); break; case 5: col_chem = tex1D(transferTexChem5, (sample_chem-transferOffset)*transferScale); break; case 6: col_chem = tex1D(transferTexChem6, (sample_chem-transferOffset)*transferScale); break; case 7: col_chem = tex1D(transferTexChem7, (sample_chem-transferOffset)*transferScale); break; } col_chem.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col_chem.x *= col_chem.w; col_chem.y *= col_chem.w; col_chem.z *= col_chem.w; // "over" operator for front-to-back blending sum = sum + col_chem*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } #endif // ECV_SAMPLE_CHEM_TEST __global__ void d_render_sp_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int gpu_id) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); // const float x_halfwidth_chem = x_halfwidth/ECV_SAMPLE_STRIDE; // const float y_halfwidth_chem = y_halfwidth/ECV_SAMPLE_STRIDE; // const float z_halfwidth_chem = z_halfwidth/ECV_SAMPLE_STRIDE; const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); // const float3 boxMin_chem = make_float3(-1.0f*x_halfwidth_chem, -1.0f*y_halfwidth_chem, -1.0f*z_halfwidth_chem); // const float3 boxMax_chem = make_float3( 1.0f*x_halfwidth_chem, 1.0f*y_halfwidth_chem, 1.0f*z_halfwidth_chem); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction using largest dimension as reference #ifdef ECV_SEPARATE float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; #else // ECV_SEPARATE const float ray_ref = (float) max(imageW, imageH); float u = ((float) x / ray_ref)*2.0f - (imageW/ray_ref);//(float) imageW)*2.0f-1.0f; float v = ((float) y / ray_ref)*2.0f - (imageH/ray_ref);//(float) imageH)*2.0f-1.0f; #endif // ECV_SEPARATE // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); float posx_chem = posx;//(pos.x + x_halfwidth_chem)/(2.0f*x_halfwidth_chem); float posy_chem = posy;//(pos.y + y_halfwidth_chem)/(2.0f*y_halfwidth_chem); float posz_chem = posz;//(pos.z + z_halfwidth_chem)/(2.0f*z_halfwidth_chem); float sample_chem0, sample_chem1; float sample_chem2, sample_chem3; float4 col_chem; float4 col_chem0, col_chem1; float4 col_chem2, col_chem3; // float sample; // float4 col; // Assuming 2 GPUs // TODO: GPUs > 2 float blendFactor = 0.5f; if (gpu_id == 0) { sample_chem0 = 10.0f * tex3D(texChem0, posx_chem, posy_chem, posz_chem); sample_chem1 = 1.0f * tex3D(texChem2, posx_chem, posy_chem, posz_chem); sample_chem2 = 1.0f * tex3D(texChem4, posx_chem, posy_chem, posz_chem); sample_chem3 = 1.0f * tex3D(texChem6, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem0, (sample_chem0-transferOffset)*transferScale); col_chem1 = tex1D(transferTexChem2, (sample_chem1-transferOffset)*transferScale); col_chem2 = tex1D(transferTexChem4, (sample_chem2-transferOffset)*transferScale); col_chem3 = tex1D(transferTexChem6, (sample_chem3-transferOffset)*transferScale); // blend col_chem.x = 0.25f*col_chem0.x + 0.25f*col_chem1.x + 0.25f*col_chem2.x + 0.25f*col_chem3.x; col_chem.y = 0.25f*col_chem0.y + 0.25f*col_chem1.y + 0.25f*col_chem2.y + 0.25f*col_chem3.y; col_chem.z = 0.25f*col_chem0.z + 0.25f*col_chem1.z + 0.25f*col_chem2.z + 0.25f*col_chem3.z; col_chem.w = 0.25f*col_chem0.w + 0.25f*col_chem1.w + 0.25f*col_chem2.w + 0.25f*col_chem3.w; } else { sample_chem0 = 1.0f * tex3D(texChem1, posx_chem, posy_chem, posz_chem); sample_chem1 = 1.0f * tex3D(texChem3, posx_chem, posy_chem, posz_chem); sample_chem2 = 1.0f * tex3D(texChem5, posx_chem, posy_chem, posz_chem); sample_chem3 = 1.0f * tex3D(texChem7, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem1, (sample_chem0-transferOffset)*transferScale); col_chem1 = tex1D(transferTexChem3, (sample_chem1-transferOffset)*transferScale); col_chem2 = tex1D(transferTexChem5, (sample_chem2-transferOffset)*transferScale); col_chem3 = tex1D(transferTexChem7, (sample_chem3-transferOffset)*transferScale); // blend col_chem.x = 1.0f*col_chem0.x + 1.0f*col_chem1.x + 1.0f*col_chem2.x + 1.0f*col_chem3.x; col_chem.y = 1.0f*col_chem0.y + 1.0f*col_chem1.y + 1.0f*col_chem2.y + 1.0f*col_chem3.y; col_chem.z = 1.0f*col_chem0.z + 1.0f*col_chem1.z + 1.0f*col_chem2.z + 1.0f*col_chem3.z; col_chem.w = 1.0f*col_chem0.w + 1.0f*col_chem1.w + 1.0f*col_chem2.w + 1.0f*col_chem3.w; } col_chem.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col_chem.x *= col_chem.w; col_chem.y *= col_chem.w; col_chem.z *= col_chem.w; // "over" operator for front-to-back blending sum = sum + col_chem*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } __global__ void d_render_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int ecmChemType, bool isChem) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; // Calculate box dimensions using largest dimension as reference const float a = -1.0f; const float b = +1.0f; const float ref = (float) max(nx, max(ny, nz)); const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a); const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a); const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a); const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth); const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; // Calculate ray vector direction using largest dimension as reference #ifdef ECV_SEPARATE float u = ((float) x / (float) imageW)*2.0f-1.0f; float v = ((float) y / (float) imageH)*2.0f-1.0f; #else // ECV_SEPARATE const float ray_ref = (float) max(imageW, imageH); float u = ((float) x / ray_ref)*2.0f - (imageW/ray_ref);//(float) imageW)*2.0f-1.0f; float v = ((float) y / ray_ref)*2.0f - (imageH/ray_ref);//(float) imageH)*2.0f-1.0f; #endif // ECV_SEPARATE // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth); float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth); float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth); #ifdef ECV_SAMPLE_CHEM #ifdef ECV_INTERLEAVE // float posx_chem = (pos.x + x_halfwidth_chem)/(2.0f*x_halfwidth_chem); // float posy_chem = (pos.y + y_halfwidth_chem)/(2.0f*y_halfwidth_chem); // float posz_chem = (pos.z + z_halfwidth_chem)/(2.0f*z_halfwidth_chem); float sample_chem0, sample_chem2; float sample_chem4, sample_chem6; float4 col_chem; float4 col_chem0, col_chem2; float4 col_chem4, col_chem6; #endif // ECV_INTERLEAVE #endif // ECV_SAMPLE_CHEM float sample; float4 col; if (isChem) { #ifdef ECV_SAMPLE_CHEM switch (ecmChemType) { case 0: sample = tex3D(texChem0, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem0, (sample-transferOffset)*transferScale); break; case 1: sample = tex3D(texChem1, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem1, (sample-transferOffset)*transferScale); break; case 2: sample = tex3D(texChem2, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem2, (sample-transferOffset)*transferScale); break; case 3: sample = tex3D(texChem3, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem3, (sample-transferOffset)*transferScale); break; case 4: sample = tex3D(texChem4, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem4, (sample-transferOffset)*transferScale); break; case 5: sample = tex3D(texChem5, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem5, (sample-transferOffset)*transferScale); break; case 6: sample = tex3D(texChem6, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem6, (sample-transferOffset)*transferScale); break; case 7: sample = tex3D(texChem7, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexChem7, (sample-transferOffset)*transferScale); break; } #endif // ECV_SAMPLE_CHEM } else { switch (ecmChemType) { case m_col: { sample = tex3D(texCol, posx, posy, posz); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture col = tex1D(transferTexCol, (sample-transferOffset)*transferScale); #ifdef ECV_SAMPLE_CHEM #ifdef ECV_INTERLEAVE float ecmf = 0.6f; float chmf = 1.0f-ecmf; sample_chem0 = 10.0f * tex3D(texChem0, posx, posy, posz); // sample_chem2 = 1.0f * tex3D(texChem2, posx_chem, posy_chem, posz_chem); // sample_chem4 = 1.0f * tex3D(texChem4, posx_chem, posy_chem, posz_chem); // sample_chem6 = 1.0f * tex3D(texChem6, posx_chem, posy_chem, posz_chem); // lookup in transfer function texture col_chem0 = tex1D(transferTexChem0, (sample_chem0-transferOffset)*transferScale); // col_chem2 = tex1D(transferTexChem2, (sample_chem2-transferOffset)*transferScale); // col_chem4 = tex1D(transferTexChem4, (sample_chem4-transferOffset)*transferScale); // col_chem6 = tex1D(transferTexChem6, (sample_chem6-transferOffset)*transferScale); // blend // col_chem.x = 0.25f*col_chem0.x + 0.25f*col_chem2.x + 0.25f*col_chem4.x + 0.25f*col_chem6.x; // col_chem.y = 0.25f*col_chem0.y + 0.25f*col_chem2.y + 0.25f*col_chem4.y + 0.25f*col_chem6.y; // col_chem.z = 0.25f*col_chem0.z + 0.25f*col_chem2.z + 0.25f*col_chem4.z + 0.25f*col_chem6.z; // col_chem.w = 0.25f*col_chem0.w + 0.25f*col_chem2.w + 0.25f*col_chem4.w + 0.25f*col_chem6.w; col_chem.x = 1.0f*col_chem0.x;// + 1.0f*col_chem2.x + 1.0f*col_chem4.x + 1.0f*col_chem6.x; col_chem.y = 1.0f*col_chem0.y;// + 1.0f*col_chem2.y + 1.0f*col_chem4.y + 1.0f*col_chem6.y; col_chem.z = 1.0f*col_chem0.z;// + 1.0f*col_chem2.z + 1.0f*col_chem4.z + 1.0f*col_chem6.z; col_chem.w = 1.0f*col_chem0.w;// + 1.0f*col_chem2.w + 1.0f*col_chem4.w + 1.0f*col_chem6.w; // col.x = col_chem.x < 0.1f? chmf*col_chem.x + ecmf*col.x : chmf*col_chem.x + 1.0f*col.x; // col.y = col_chem.y < 0.1f? chmf*col_chem.y + ecmf*col.y : chmf*col_chem.y + 1.0f*col.y; // col.z = col_chem.z < 0.1f? chmf*col_chem.z + ecmf*col.z : chmf*col_chem.z + 1.0f*col.z; // col.w = col_chem.w < 0.1f? chmf*col_chem.w + ecmf*col.w : chmf*col_chem.w + 1.0f*col.w; col.x = chmf*col_chem.x + 1.0f*col.x; col.y = chmf*col_chem.y + 1.0f*col.y; col.z = chmf*col_chem.z + 1.0f*col.z; col.w = chmf*col_chem.w + 1.0f*col.w; #endif // ECV_INTERLEAVE #endif // ECV_SAMPLE_CHEM break; } case m_ela: sample = tex3D(texEla, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexEla, (sample-transferOffset)*transferScale); break; case m_hya: sample = tex3D(texHya, posx, posy, posz); // lookup in transfer function texture col = tex1D(transferTexHya, (sample-transferOffset)*transferScale); break; } } col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { texCol.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; texEla.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; texHya.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void printCpyParams(cudaMemcpy3DParms cp){ /** * struct cudaArray *srcArray; struct cudaPos srcPos; struct cudaPitchedPtr srcPtr; struct cudaArray *dstArray; struct cudaPos dstPos; struct cudaPitchedPtr dstPtr; struct cudaExtent extent; enum cudaMemcpyKind kind; */ printf("copy params:\n"); printf("\tsrcArray: %p\n", cp.srcArray); printf("\tsrcPos: %d, %d, %d\n", cp.srcPos.x, cp.srcPos.y, cp.srcPos.z); printf("\tsrcPtr:\n"); // if(cp.srcPtr != 0) // { printf("\t\tpitch: %d\n", cp.srcPtr.pitch); printf("\t\tptr: %p\n", cp.srcPtr.ptr); printf("\t\txsize: %d\n", cp.srcPtr.xsize); printf("\t\tysize: %d\n", cp.srcPtr.ysize); // } printf("\tdstArray: %p\n", cp.dstArray); printf("\tdstPos: %d, %d, %d\n", cp.dstPos.x, cp.dstPos.y, cp.dstPos.z); printf("\tdstPtr:\n"); // if(cp.dstPtr != 0) // { printf("\t\tpitch: %d\n", cp.dstPtr.pitch); printf("\t\tptr: %p\n", cp.dstPtr.ptr); printf("\t\txsize: %d\n", cp.dstPtr.xsize); printf("\t\tysize: %d\n", cp.dstPtr.ysize); // } printf("\textent: %d, %d, %d\n", cp.extent.width, cp.extent.height, cp.extent.depth); } #ifdef AVEP #ifdef AVEP_INC extern "C" void bufferECMmapAVEP( cudaMemcpy3DParms copyParams, cudaMemcpy3DParms svCopyParams, ecm_i ecmType, int incRound, float incFactor) { if (!incRound) // first round { // copy a subvolume from host into device buffer printf("\t\tcopying...\n"); checkCudaErrors(cudaMemcpy3D(&svCopyParams)); printf("\t\tdone copying\n"); } // copy data from device buffer to device volume array printf("\t\tbuffering...\n"); VolumeType *d_Src = (VolumeType *) svCopyParams.dstPtr.ptr; VolumeType *d_Dst = (VolumeType *) copyParams.dstArray; bufferToVolumeAVEP( d_Dst, d_Src, (svCopyParams.extent.width)/sizeof(VolumeType), svCopyParams.extent.height, svCopyParams.extent.depth, copyParams.extent.width, copyParams.extent.height, copyParams.extent.depth, svCopyParams.srcPos, ecmType, incRound, incFactor); printf("\t\tdone buffering\n"); } #else // AVEP_INC extern "C" void bufferECMmapAVEP( cudaMemcpy3DParms copyParams, cudaMemcpy3DParms svCopyParams, ecm_i ecmType) { // copy a subvolume from host into device buffer printf("\t\tcopying...\n"); checkCudaErrors(cudaMemcpy3D(&svCopyParams)); printf("\t\tdone copying\n"); // copy data from device buffer to device volume array printf("\t\tbuffering...\n"); VolumeType *d_Src = (VolumeType *) svCopyParams.dstPtr.ptr; VolumeType *d_Dst = (VolumeType *) copyParams.dstArray; bufferToVolumeAVEP( d_Dst, d_Src, (svCopyParams.extent.width)/sizeof(VolumeType), svCopyParams.extent.height, svCopyParams.extent.depth, copyParams.extent.width, copyParams.extent.height, copyParams.extent.depth, svCopyParams.srcPos, ecmType); printf("\t\tdone buffering\n"); } #endif // AVEP_INC #endif // AVEP #ifdef ECV_SAMPLE_CHEM #ifdef ECV_SAMPLE_CHEM_TEST // gets called in DiffusionHelper.cpp extern "C" void initCudaChemSample(cudaExtent volumeSize, int chemIndex) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); checkCudaErrors(cudaMalloc3DArray(&(d_chemsample_h[chemIndex]), &channelDesc, volumeSize, cudaArraySurfaceLoadStore)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); switch (chemIndex) { case 0: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = cudaFilterModeLinear; // linear interpolation texChem0.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // TNF: float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; cudaArray *d_transferFuncArrayChem0; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem0.filterMode = cudaFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } case 2: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem2, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem2.normalized = true; // access with normalized texture coordinates texChem2.filterMode = cudaFilterModeLinear; // linear interpolation texChem2.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem2.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem2, d_chemsample_h[chemIndex], channelDesc)); // TGF: Purple-Turquoise float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.623520, 0.372549, 0.623529, 0.30, }, // 0.10 // purple // { 1.00, 0.32, 0.18, 0.30, }, // bright orange { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 1.00, 0.00, 0.60, }, // 0.60 // yellow { 0.196078, 0.60, 0.80, 0.80, }, // 0.80 // sky blue { 0.439216, 0.858824, 0.576471, 1.00, }, // Turquoise }; cudaArray *d_transferFuncArrayChem2; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem2, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem2, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem2.filterMode = cudaFilterModeLinear; transferTexChem2.normalized = true; // access with normalized texture coordinates transferTexChem2.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem2, d_transferFuncArrayChem2, channelDesc2)); break; } default: { printf("Chem Resolution Comparison: Wrong buffer index %d\n", chemIndex); exit(-1); } } } #else // ECV_SAMPLE_CHEM_TEST // gets called in DiffusionHelper.cpp extern "C" void initCudaChemSample(cudaExtent volumeSize, int chemIndex) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); checkCudaErrors(cudaMalloc3DArray(&(d_chemsample_h[chemIndex]), &channelDesc, volumeSize, cudaArraySurfaceLoadStore)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); switch (chemIndex) { case 0: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = cudaFilterModeLinear; // linear interpolation texChem0.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // TNF: float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; cudaArray *d_transferFuncArrayChem0; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem0.filterMode = cudaFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } case 1: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem1, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem1.normalized = true; // access with normalized texture coordinates texChem1.filterMode = cudaFilterModeLinear; // linear interpolation texChem1.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem1.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem1, d_chemsample_h[chemIndex], channelDesc)); // TGF: Purple-Turquoise float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.623520, 0.372549, 0.623529, 0.30, }, // 0.10 // purple // { 1.00, 0.32, 0.18, 0.30, }, // bright orange { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 1.00, 0.00, 0.60, }, // 0.60 // yellow { 0.196078, 0.60, 0.80, 0.80, }, // 0.80 // sky blue { 0.439216, 0.858824, 0.576471, 1.00, }, // Turquoise }; cudaArray *d_transferFuncArrayChem1; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem1, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem1, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem1.filterMode = cudaFilterModeLinear; transferTexChem1.normalized = true; // access with normalized texture coordinates transferTexChem1.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem1, d_transferFuncArrayChem1, channelDesc2)); break; } case 2: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem2, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem2.normalized = true; // access with normalized texture coordinates texChem2.filterMode = cudaFilterModeLinear; // linear interpolation texChem2.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem2.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem2, d_chemsample_h[chemIndex], channelDesc)); // FGF: Brown float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.647059, 0.164706, 0.164706, 0.05, }, // 0.05 { 0.647059, 0.164706, 0.164706, 0.10, }, // 0.10 { 0.647059, 0.164706, 0.164706, 0.15, }, // 0.15 { 0.647059, 0.164706, 0.164706, 0.20, }, // 0.20 { 0.647059, 0.164706, 0.164706, 0.25, }, // 0.25 { 0.647059, 0.164706, 0.164706, 0.30, }, // 0.30 { 0.647059, 0.164706, 0.164706, 0.35, }, // 0.35 { 0.647059, 0.164706, 0.164706, 0.40, }, // 0.40 { 0.647059, 0.164706, 0.164706, 0.45, }, // 0.45 { 0.647059, 0.164706, 0.164706, 0.50, }, // 0.50 { 0.647059, 0.164706, 0.164706, 0.55, }, // 0.55 { 0.647059, 0.164706, 0.164706, 0.60, }, // 0.60 { 0.647059, 0.164706, 0.164706, 0.65, }, // 0.65 { 0.647059, 0.164706, 0.164706, 0.70, }, // 0.70 { 0.647059, 0.164706, 0.164706, 0.75, }, // 0.75 { 0.647059, 0.164706, 0.164706, 0.80, }, // 0.80 { 0.647059, 0.164706, 0.164706, 0.85, }, // 0.85 { 0.647059, 0.164706, 0.164706, 0.90, }, // 0.90 { 0.647059, 0.164706, 0.164706, 0.95, }, // 0.95 { 0.647059, 0.164706, 0.164706, 1.00, }, // 1.00 }; cudaArray *d_transferFuncArrayChem2; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem2, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem2, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem2.filterMode = cudaFilterModeLinear; transferTexChem2.normalized = true; // access with normalized texture coordinates transferTexChem2.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem2, d_transferFuncArrayChem2, channelDesc2)); break; } case 3: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem3, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem3.normalized = true; // access with normalized texture coordinates texChem3.filterMode = cudaFilterModeLinear; // linear interpolation texChem3.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem3.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem3, d_chemsample_h[chemIndex], channelDesc)); // MMP8: Sky blue float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.196078, 0.600000, 0.800000, 0.05, }, // 0.05 { 0.196078, 0.600000, 0.800000, 0.10, }, // 0.10 { 0.196078, 0.600000, 0.800000, 0.15, }, // 0.15 { 0.196078, 0.600000, 0.800000, 0.20, }, // 0.20 { 0.196078, 0.600000, 0.800000, 0.25, }, // 0.25 { 0.196078, 0.600000, 0.800000, 0.30, }, // 0.30 { 0.196078, 0.600000, 0.800000, 0.35, }, // 0.35 { 0.196078, 0.600000, 0.800000, 0.40, }, // 0.40 { 0.196078, 0.600000, 0.800000, 0.45, }, // 0.45 { 0.196078, 0.600000, 0.800000, 0.50, }, // 0.50 { 0.196078, 0.600000, 0.800000, 0.55, }, // 0.55 { 0.196078, 0.600000, 0.800000, 0.60, }, // 0.60 { 0.196078, 0.600000, 0.800000, 0.65, }, // 0.65 { 0.196078, 0.600000, 0.800000, 0.70, }, // 0.70 { 0.196078, 0.600000, 0.800000, 0.75, }, // 0.75 { 0.196078, 0.600000, 0.800000, 0.80, }, // 0.80 { 0.196078, 0.600000, 0.800000, 0.85, }, // 0.85 { 0.196078, 0.600000, 0.800000, 0.90, }, // 0.90 { 0.196078, 0.600000, 0.800000, 0.95, }, // 0.95 { 0.196078, 0.600000, 0.800000, 1.00, }, // 1.00 }; cudaArray *d_transferFuncArrayChem; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem3, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem3, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem3.filterMode = cudaFilterModeLinear; transferTexChem3.normalized = true; // access with normalized texture coordinates transferTexChem3.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem3, d_transferFuncArrayChem3, channelDesc2)); break; } case 4: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem4, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem4.normalized = true; // access with normalized texture coordinates texChem4.filterMode = cudaFilterModeLinear; // linear interpolation texChem4.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem4.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem4, d_chemsample_h[chemIndex], channelDesc)); // IL1: Green Beach from https://digitalsynopsis.com/design/beautiful-color-ui-gradients-backgrounds/ float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.01, 0.67, 0.69, 0.50, }, // blue-mild green { 0.00, 0.80, 0.67, 1.00, }, // light blue-mild green }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayChem4; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem4, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem4, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem4.filterMode = cudaFilterModeLinear; transferTexChem4.normalized = true; // access with normalized texture coordinates transferTexChem4.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem4, d_transferFuncArrayChem4, channelDesc2)); break; } case 5: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem5, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem5.normalized = true; // access with normalized texture coordinates texChem5.filterMode = cudaFilterModeLinear; // linear interpolation texChem5.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem5.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem5, d_chemsample_h[chemIndex], channelDesc)); // IL6: Pink float4 transferFunc[] = { { 0.000000, 0.000000, 0.000000, 0.00, }, // 0.00 { 0.737255, 0.560784, 0.560784, 0.05, }, // 0.05 { 0.737255, 0.560784, 0.560784, 0.10, }, // 0.10 { 0.737255, 0.560784, 0.560784, 0.15, }, // 0.15 { 0.737255, 0.560784, 0.560784, 0.20, }, // 0.20 { 0.737255, 0.560784, 0.560784, 0.25, }, // 0.25 { 0.737255, 0.560784, 0.560784, 0.30, }, // 0.30 { 0.737255, 0.560784, 0.560784, 0.35, }, // 0.35 { 0.737255, 0.560784, 0.560784, 0.40, }, // 0.40 { 0.737255, 0.560784, 0.560784, 0.45, }, // 0.45 { 0.737255, 0.560784, 0.560784, 0.50, }, // 0.50 { 0.737255, 0.560784, 0.560784, 0.55, }, // 0.55 { 0.737255, 0.560784, 0.560784, 0.60, }, // 0.60 { 0.737255, 0.560784, 0.560784, 0.65, }, // 0.65 { 0.737255, 0.560784, 0.560784, 0.70, }, // 0.70 { 0.737255, 0.560784, 0.560784, 0.75, }, // 0.75 { 0.737255, 0.560784, 0.560784, 0.80, }, // 0.80 { 0.737255, 0.560784, 0.560784, 0.85, }, // 0.85 { 0.737255, 0.560784, 0.560784, 0.90, }, // 0.90 { 0.737255, 0.560784, 0.560784, 0.95, }, // 0.95 { 0.737255, 0.560784, 0.560784, 1.00, }, // 1.00 }; cudaArray *d_transferFuncArrayChem5; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem5, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem5, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem5.filterMode = cudaFilterModeLinear; transferTexChem5.normalized = true; // access with normalized texture coordinates transferTexChem5.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem5, d_transferFuncArrayChem5, channelDesc2)); break; } case 6: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem6, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem6.normalized = true; // access with normalized texture coordinates texChem6.filterMode = cudaFilterModeLinear; // linear interpolation texChem6.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem6.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); // IL8: Orange-Yellow float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.23, 0.11, 0.44, 0.30, }, // purple { 0.84, 0.42, 0.47, 0.40, }, // salmon pink { 1.00, 0.69, 0.48, 0.80, }, // mild orange { 1.00, 0.79, 0.58, 1.00, }, // light mild orange }; cudaArray *d_transferFuncArrayChem6; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem6, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem6, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem6.filterMode = cudaFilterModeLinear; transferTexChem6.normalized = true; // access with normalized texture coordinates transferTexChem6.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem6, d_transferFuncArrayChem6, channelDesc2)); break; } case 7: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem7, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem7.normalized = true; // access with normalized texture coordinates texChem7.filterMode = cudaFilterModeLinear; // linear interpolation texChem7.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem7.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem7, d_chemsample_h[chemIndex], channelDesc)); // IL10: White float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 0.26, 0.81, 0.64, 0.30, }, // 0.10 // green { 0.60, 0.80, 0.196078, 0.50, }, // 0.20 // yellow-green { 1.00, 0.11, 0.68, 0.60, }, // 0.60 { 0.678431, 0.917647, 0.917647, 0.80, }, // 0.80 { 0.00, 0.00, 1.00, 1.00, }, // 1.00 }; cudaArray *d_transferFuncArrayChem7; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem7, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem7, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem7.filterMode = cudaFilterModeLinear; transferTexChem7.normalized = true; // access with normalized texture coordinates transferTexChem7.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem7, d_transferFuncArrayChem7, channelDesc2)); break; } default: { // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfChem0, d_chemsample_h[chemIndex], channelDesc)); // set texture parameters texChem0.normalized = true; // access with normalized texture coordinates texChem0.filterMode = cudaFilterModeLinear; // linear interpolation texChem0.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texChem0.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texChem0, d_chemsample_h[chemIndex], channelDesc)); float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.00, }, // 0.00 { 1.00, 0.00, 1.00, 0.05, }, // 0.05 { 1.00, 0.05, 0.90, 0.10, }, // 0.10 { 0.80, 0.10, 0.80, 0.15, }, // 0.15 { 0.60, 0.15, 0.70, 0.20, }, // 0.20 { 0.40, 0.20, 0.60, 0.25, }, // 0.25 { 0.20, 0.25, 0.50, 0.30, }, // 0.30 { 0.00, 0.30, 0.40, 0.35, }, // 0.35 { 0.40, 0.35, 0.30, 0.40, }, // 0.40 { 0.60, 0.40, 0.20, 0.45, }, // 0.45 { 0.70, 0.45, 0.10, 0.50, }, // 0.50 { 0.80, 0.45, 0.00, 0.55, }, // 0.55 { 0.90, 0.50, 0.00, 0.60, }, // 0.60 { 1.00, 0.50, 0.00, 0.65, }, // 0.65 { 1.00, 0.50, 0.00, 0.70, }, // 0.70 { 1.00, 0.50, 0.00, 0.75, }, // 0.75 { 1.00, 0.50, 0.00, 0.80, }, // 0.80 { 1.00, 0.50, 0.00, 0.85, }, // 0.85 { 1.00, 0.50, 0.00, 0.90, }, // 0.90 { 1.00, 0.50, 0.00, 0.95, }, // 0.95 { 1.00, 0.50, 0.00, 1.00, }, // 1.00 }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayChem0; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayChem0, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayChem0, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexChem0.filterMode = cudaFilterModeLinear; transferTexChem0.normalized = true; // access with normalized texture coordinates transferTexChem0.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexChem0, d_transferFuncArrayChem0, channelDesc2)); break; } } } #endif // ECV_SAMPLE_CHEM_TEST #endif // ECV_SAMPLE_CHEM extern "C" void bufferECMmap(cudaMemcpy3DParms copyParams) { checkCudaErrors(cudaMemcpy3D(&copyParams)); } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize, cudaMemcpy3DParms &copyParams, ecm_i ecmType) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); checkCudaErrors(cudaMalloc3DArray(&(d_volumeArray[ecmType]), &channelDesc, volumeSize, cudaArraySurfaceLoadStore)); // copy data to 3D array copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray[ecmType]; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); //create transfer function texture switch(ecmType) { case m_col: { // Collagen float4 transferFunc[] = { { 0.00, 0.00, 0.00, 0.0, }, // 0.00 { 1.00, 0.00, 0.00, 0.5, }, // 0.05 - SLP ILP { 1.00, 0.30, 0.30, 0.8, }, // 0.10 - SLP ILP { 0.00, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 0.80, 0.15, 0.10, 0.2, }, // 0.20 - DLP ILP SLP { 0.80, 0.15, 0.10, 0.3, }, // 0.25 - DLP { 0.80, 0.15, 0.10, 0.4, }, // 0.30 - DLP { 0.80, 0.15, 0.10, 0.5, }, // 0.35 - DLP { 0.80, 0.15, 0.10, 0.6, }, // 0.40 - DLP { 0.80, 0.15, 0.10, 0.7, }, // 0.45 { 0.80, 0.15, 0.10, 0.8, }, // 0.50 { 0.80, 0.15, 0.10, 0.9, }, // 0.55 { 0.85, 0.10, 0.15, 0.8, }, // 0.60 { 0.90, 0.10, 0.15, 0.7, }, // 0.65 { 0.95, 0.10, 0.10, 0.6, }, // 0.70 { 1.00, 0.10, 0.10, 0.5, }, // 0.75 { 1.00, 0.10, 0.10, 0.6, }, // 0.80 { 1.00, 0.10, 0.10, 0.7, }, // 0.85 { 1.00, 0.20, 0.20, 0.8, }, // 0.90 { 1.00, 0.30, 0.30, 0.9, }, // 0.95 { 1.00, 0.60, 0.00, 1.0, }, // 1.00 { 0.60, 0.40, 0.32, 1.0, }, }; #ifdef AVEP // bind array to 3D surface checkCudaErrors(cudaBindSurfaceToArray(srfCol, d_volumeArray[ecmType], channelDesc)); // checkCudaErrors(cudaBindSurfaceToArray(srfCol, d_volumeArray[ecmType])); #endif // set texture parameters texCol.normalized = true; // access with normalized texture coordinates texCol.filterMode = cudaFilterModeLinear; // linear interpolation texCol.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texCol.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture checkCudaErrors(cudaBindTextureToArray(texCol, d_volumeArray[ecmType], channelDesc)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayCol; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayCol, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayCol, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexCol.filterMode = cudaFilterModeLinear; transferTexCol.normalized = true; // access with normalized texture coordinates transferTexCol.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexCol, d_transferFuncArrayCol, channelDesc2)); break; } case m_ela: { / // Elastin float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, // 0.00 { 0.0, 1.0, 0.0, 0.5, }, // 0.05 - SLP ILP { 0.0, 0.0, 0.0, 0.0, }, // 0.10 - SLP ILP { 0.63, 0.12, 0.4, 0.3, }, // 0.15 - SLP ILP { 0.63, 0.12, 0.4, 0.4, }, // 0.20 - DLP ILP SLP { 0.63, 0.12, 0.4, 0.5, }, // 0.25 - DLP { 0.63, 0.12, 0.4, 0.6, }, // 0.30 - DLP { 0.63, 0.12, 0.4, 0.7, }, // 0.35 - DLP { 0.63, 0.12, 0.4, 0.8, }, // 0.40 - DLP { 0.63, 0.12, 0.4, 0.9, }, // 0.45 { 0.0, 1.0, 0.30, 1.0, }, // 0.50 { 0.0, 1.0, 0.30, 1.0, }, // 0.55 { 0.0, 1.0, 0.30, 1.0, }, // 0.60 { 0.0, 1.0, 0.30, 1.0, }, // 0.65 { 0.0, 1.0, 0.30, 1.0, }, // 0.70 { 0.0, 1.0, 0.30, 1.0, }, // 0.75 { 0.0, 0.0, 0.0, 0.0, }, // 0.80 { 0.0, 1.0, 0.30, 1.0, }, // 0.85 { 0.0, 1.0, 0.40, 0.5, }, // 0.90 { 0.0, 1.0, 0.50, 0.7, }, // 0.95 { 0.0, 1.0, 0.60, 1.0, }, // 1.00 }; // set texture parameters texEla.normalized = true; // access with normalized texture coordinates texEla.filterMode = cudaFilterModeLinear; // linear interpolation texEla.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texEla.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(cudaBindSurfaceToArray(srfEla, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(cudaBindTextureToArray(texEla, d_volumeArray[ecmType], channelDesc)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayEla; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayEla, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayEla, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexEla.filterMode = cudaFilterModeLinear; transferTexEla.normalized = true; // access with normalized texture coordinates transferTexEla.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexEla, d_transferFuncArrayEla, channelDesc2)); break; } case m_hya: { // Hyaluronan #ifdef RAT_VF float4 transferFunc[] = { { 0.0, 0.00, 0.00, 0.0, }, // 0.00 { 0.0, 0.00, 1.00, 0.5, }, // 0.05 - SLP ILP { 0.3, 0.30, 1.00, 0.8, }, // 0.10 - SLP ILP { 0.0, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 1.0, 0.43, 0.78, 0.2, }, // 0.20 - DLP ILP SLP { 1.0, 0.43, 0.78, 0.3, }, // 0.25 - DLP { 1.0, 0.43, 0.78, 0.4, }, // 0.30 - DLP { 1.0, 0.43, 0.78, 0.5, }, // 0.35 - DLP { 1.0, 0.43, 0.78, 0.6, }, // 0.40 - DLP { 1.0, 0.43, 0.78, 0.7, }, // 0.45 { 1.0, 0.43, 0.78, 0.8, }, // 0.50 { 1.0, 0.43, 0.78, 0.9, }, // 0.55 { 0.8, 0.33, 0.85, 0.8, }, // 0.60 { 0.5, 0.23, 0.90, 0.7, }, // 0.65 { 0.3, 0.13, 0.95, 0.6, }, // 0.70 { 0.0, 0.00, 1.00, 0.5, }, // 0.75 { 0.1, 0.10, 1.00, 0.6, }, // 0.80 { 0.2, 0.20, 1.00, 0.7, }, // 0.85 { 0.3, 0.30, 1.00, 0.8, }, // 0.90 { 0.4, 0.40, 1.00, 0.9, }, // 0.95 { 0.7, 0.70, 1.00, 1.0, }, // 1.00 }; #else // RAT_VF float4 transferFunc[] = { { 0.0, 0.00, 0.00, 0.0, }, // 0.00 { 0.0, 0.00, 1.00, 0.5, }, // 0.05 - SLP ILP { 0.3, 0.30, 1.00, 0.8, }, // 0.10 - SLP ILP { 0.0, 0.00, 0.00, 0.0, }, // 0.15 - SLP ILP { 0.1, 0.43, 0.78, 0.2, }, // 0.20 - DLP ILP SLP { 0.2, 0.43, 0.78, 0.3, }, // 0.25 - DLP { 0.3, 0.43, 0.78, 0.4, }, // 0.30 - DLP { 0.4, 0.43, 0.78, 0.5, }, // 0.35 - DLP { 0.5, 0.43, 0.78, 0.6, }, // 0.40 - DLP { 0.6, 0.43, 0.78, 0.7, }, // 0.45 { 0.7, 0.43, 0.78, 0.8, }, // 0.50 { 0.7, 0.43, 0.78, 0.9, }, // 0.55 { 0.8, 0.33, 0.85, 0.8, }, // 0.60 { 0.5, 0.23, 0.90, 0.7, }, // 0.65 { 0.3, 0.13, 0.95, 0.6, }, // 0.70 { 0.0, 0.00, 1.00, 0.5, }, // 0.75 { 0.1, 0.10, 1.00, 0.6, }, // 0.80 { 0.2, 0.20, 1.00, 0.7, }, // 0.85 { 0.3, 0.30, 1.00, 0.8, }, // 0.90 { 0.4, 0.40, 1.00, 0.9, }, // 0.95 { 0.3, 0.10, 1.00, 1.0, }, // 1.00 { 0.0, 0.00, 1.00, 1.0, }, // 1.00 }; #endif // RAT_VF // set texture parameters texHya.normalized = true; // access with normalized texture coordinates texHya.filterMode = cudaFilterModeLinear; // linear interpolation texHya.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texHya.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(cudaBindSurfaceToArray(srfHya, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(cudaBindTextureToArray(texHya, d_volumeArray[ecmType], channelDesc)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayHya; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayHya, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayHya, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexHya.filterMode = cudaFilterModeLinear; transferTexHya.normalized = true; // access with normalized texture coordinates transferTexHya.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexHya, d_transferFuncArrayHya, channelDesc2)); break; } default: { // WEF // White bg float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 0.97, 0.8, 0.72, 1.0, }, { 0.97, 0.8, 0.72, 0.5, }, { 0.80, 0.6, 0.52, 0.7, }, { 0.60, 0.4, 0.32, 1.0, },//0.5, }, }; // // Black bg // float4 transferFunc[] = // { // { 0.0, 0.0, 0.0, 0.0, }, // { 1.0, 0.0, 0.0, 1.0, }, // { 1.0, 0.0, 0.0, 1.0, }, // { 0.97, 0.8, 0.72, 1.0, }, // { 0.97, 0.4, 0.30, 1.0, }, // { 0.97, 0.6, 0.50, 0.7, }, // { 0.97, 0.8, 0.72, 0.8, },//0.5, }, // }; // set texture parameters texCol.normalized = true; // access with normalized texture coordinates texCol.filterMode = cudaFilterModeLinear; // linear interpolation texCol.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texCol.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture #ifdef AVEP checkCudaErrors(cudaBindSurfaceToArray(srfCol, d_volumeArray[ecmType], channelDesc)); #endif checkCudaErrors(cudaBindTextureToArray(texCol, d_volumeArray[ecmType], channelDesc)); cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArrayCol; checkCudaErrors(cudaMallocArray(&d_transferFuncArrayCol, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpyToArray(d_transferFuncArrayCol, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTexCol.filterMode = cudaFilterModeLinear; transferTexCol.normalized = true; // access with normalized texture coordinates transferTexCol.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture checkCudaErrors(cudaBindTextureToArray(transferTexCol, d_transferFuncArrayCol, channelDesc2)); break; } } } #ifdef AVEP extern "C" void initCudaAVEP( void *h_volume, cudaExtent volumeSize, cudaMemcpy3DParms &copyParams, cudaMemcpy3DParms &svCopyParams, ecm_i ecmType) { initCuda(h_volume, volumeSize, copyParams, ecmType); #ifdef AVEP_INC // Allocate buffer device memory checkCudaErrors(cudaMalloc(&(d_svBuffer[ecmType]), volumeSize.width*volumeSize.height*volumeSize.depth*sizeof(VolumeType))); svCopyParams.dstPtr = make_cudaPitchedPtr( d_svBuffer[ecmType], volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); #else // AVEP_INC // Allocate buffer device memory checkCudaErrors(cudaMalloc(&(d_svBuffer[ecmType]), SV_W*SV_H*SV_D*sizeof(VolumeType))); svCopyParams.dstPtr = make_cudaPitchedPtr( d_svBuffer[ecmType], SV_W*sizeof(VolumeType), SV_W, SV_H); #endif // AVEP_INC // initialize copy params for sub-volumes svCopyParams.srcPtr = copyParams.srcPtr; svCopyParams.extent = make_cudaExtent(SV_W*sizeof(VolumeType), SV_H, SV_D); svCopyParams.kind = cudaMemcpyHostToDevice; } #endif extern "C" void freeCudaBuffers() { for(int ei = 0; ei < m_ecmtotal; ei++) { checkCudaErrors(cudaFreeArray(d_volumeArray[ei])); } checkCudaErrors(cudaFreeArray(d_transferFuncArrayCol)); checkCudaErrors(cudaFreeArray(d_transferFuncArrayEla)); checkCudaErrors(cudaFreeArray(d_transferFuncArrayHya)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { d_render<<<gridSize, blockSize>>>(d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void render_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int ecmChemType, bool isChem) { d_render_dim<<<gridSize, blockSize>>>(d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, ecmChemType, isChem); } extern "C" void render_sp_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int gpu_id) { d_render_sp_dim<<<gridSize, blockSize>>>(d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, gpu_id); } #ifdef ECV_SAMPLE_CHEM_TEST extern "C" void render_test_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, int chemType, bool isHigh) { d_render_test_dim<<<gridSize, blockSize>>>(d_output, nx, ny, nz, imageW, imageH, density, brightness, transferOffset, transferScale, chemType, isHigh); } #endif extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
705e29fa2e588a1f5c77c2afc9db3940a415d9d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "hipcub/hipcub.hpp" #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class BatchNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); const bool is_test = ctx.Attr<bool>("is_test"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool trainable_stats = ctx.Attr<bool>("trainable_statistics"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_stats); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input<Tensor>("X"); const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5" "But received: the size of input's dimensions is [%d]", x_dims.size())); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); auto dtype = platform::CudnnDataType<T>::type; const bool fast_nhwc_batch_norm = test_mode || (dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; Tensor transformed_x(x->type()); Tensor transformed_y(y->type()); if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y, &transformed_y); } else { transformed_x.ShareDataWith(*x); transformed_y.ShareDataWith(*y); } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif // CUDNN_VERSION_MIN(7, 0, 1) VLOG(3) << "Setting descriptors."; std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * D * C, 1, W * D * C, D * C, C}; } PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_)); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. // It is training mode when it's not reference AND not using pre-trained // model. bool training = !test_mode && !use_global_stats; if (!training) { // only when test we use input to do computation. const auto *est_mean = ctx.Input<Tensor>("Mean"); const auto *est_var = ctx.Input<Tensor>("Variance"); // Run inference mode. PADDLE_ENFORCE_EQ( est_mean->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of mean's dimensions must equal to 1." "But received: the size of mean's dimensions mean is [%d]," "the dimensions of mean is [%s].", est_mean->dims().size(), est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of variance's dimensions must equal to 1." "But received: the size of variance's dimensions is [%d]," "the dimensions of variance is [%s].", est_var->dims().size(), est_var->dims())); PADDLE_ENFORCE_EQ( est_mean->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of mean must equal to the number of " "Channels, which is [%d]. But received: the first dimension" "of mean is [%d], the dimensions of mean is [%s].", C, est_mean->dims()[0], est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of variance must equal to the number" "of Channels, which is [%d]. But received: the first dimension of" "variance is [%d], the dimensions of variance is [%s].", C, est_var->dims()[0], est_var->dims())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), epsilon)); } else { // if MomentumTensor is set, use MomentumTensor value, momentum // is only used in this training branch if (ctx.HasInput("MomentumTensor")) { const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor"); Tensor mom_cpu; TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu); momentum = mom_cpu.data<float>()[0]; } // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0)); if ((N * H * W * D) == 1) { // Only 1 element in normalization dimension, // skip the batch norm calculation, let y = x. framework::TensorCopy(*x, ctx.GetPlace(), y); } else { double this_factor = 1. - momentum; bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; Tensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<Tensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*zDesc=*/nullptr, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*activationDesc=*/nullptr, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space_ptr = reserve_space->mutable_data( ctx.GetPlace(), transformed_x.type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), nullptr, nullptr, data_desc_, transformed_y.template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), nullptr, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()))); } } } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_y, y); } // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, framework::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const hipStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream, layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void BNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, hipcub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T> class BatchNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); // batch_norm with inplace as false will take X as grad input, which // is same as cuDNN batch_norm backward calculation, batch_norm // with inplace as true only take Y as input and X should be calculate // by inverse operation of batch_norm on Y const Tensor *x; bool is_inplace; if (ctx.HasInput("Y")) { x = ctx.Input<Tensor>("Y"); is_inplace = true; PADDLE_ENFORCE_EQ(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD not inplace in inplace mode")); } else { x = ctx.Input<Tensor>("X"); is_inplace = false; PADDLE_ENFORCE_NE(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD inplaced in non-inplace mode")); } const bool is_test = ctx.Attr<bool>("is_test"); PADDLE_ENFORCE_EQ( is_test, false, platform::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output d_x->mutable_data<T>(ctx.GetPlace()); if (d_scale && d_bias) { d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); } PADDLE_ENFORCE_EQ( scale->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale->dims().size(), scale->dims())); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale->dims()[0])); auto dtype = platform::CudnnDataType<T>::type; const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace"); const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && reserve_space != nullptr; auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; Tensor transformed_x(x->type()); Tensor transformed_d_y(d_y->type()); Tensor transformed_d_x(d_x->type()); if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x, &transformed_d_x); } else { transformed_x.ShareDataWith(*x); transformed_d_y.ShareDataWith(*d_y); transformed_d_x.ShareDataWith(*d_x); } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const int num = transformed_x.numel(); const int block = 512; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = ::min(C, max_blocks); auto stream = dev_ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif // CUDNN_VERSION_MIN(7, 0, 1) PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_scale && d_bias) { bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; void *workspace_ptr = nullptr; Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/transformed_d_x.template mutable_data<T>( ctx.GetPlace()), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/d_scale ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*dBnBiasData=*/d_bias ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>( reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, transformed_d_x.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), d_scale->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } } else { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, dev_ctx.stream(), d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } } } // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } else { const auto *running_mean = ctx.Input<Tensor>("Mean"); const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = *x; inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x->data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData< T, framework::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias< T, block, framework::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData< T, framework::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias< T, block, framework::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } }; template <typename T> class BatchNormDoubleGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *X = ctx.Input<Tensor>("X"); const auto *Scale = ctx.Input<Tensor>("Scale"); const auto *dY = ctx.Input<Tensor>("DY"); const auto *Saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *Saved_variance = ctx.Input<Tensor>("SavedVariance"); const double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool is_test = ctx.Attr<bool>("is_test"); PADDLE_ENFORCE_EQ( is_test, false, platform::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *ddX = ctx.Input<Tensor>("DDX"); const auto *ddScale = ctx.Input<Tensor>("DDScale"); const auto *ddBias = ctx.Input<Tensor>("DDBias"); auto *dX = ctx.Output<Tensor>("DX"); auto *dScale = ctx.Output<Tensor>("DScale"); auto *ddY = ctx.Output<Tensor>("DDY"); NormDoubleGradFunctor<platform::CUDADeviceContext, T>( ctx, data_layout, X, Scale, dY, Saved_mean, Saved_variance, epsilon, use_global_stats, ddX, ddScale, ddBias, dX, dScale, ddY); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, double>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, double>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad_grad, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, double>);
705e29fa2e588a1f5c77c2afc9db3940a415d9d3.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <cfloat> #include <string> #include <vector> #include "cub/cub.cuh" #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/batch_norm_op.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace paddle { namespace operators { using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> using CudnnDataType = platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T> class BatchNormKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); float momentum = ctx.Attr<float>("momentum"); const bool is_test = ctx.Attr<bool>("is_test"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool trainable_stats = ctx.Attr<bool>("trainable_statistics"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); bool test_mode = is_test && (!trainable_stats); // Get the size for each dimension. // NCHW [batch_size, in_channels, in_height, in_width] const auto *x = ctx.Input<Tensor>("X"); const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5" "But received: the size of input's dimensions is [%d]", x_dims.size())); auto *y = ctx.Output<Tensor>("Y"); y->mutable_data<T>(ctx.GetPlace()); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); auto dtype = platform::CudnnDataType<T>::type; const bool fast_nhwc_batch_norm = test_mode || (dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; Tensor transformed_x(x->type()); Tensor transformed_y(y->type()); if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, y, &transformed_y); } else { transformed_x.ShareDataWith(*x); transformed_y.ShareDataWith(*y); } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif // CUDNN_VERSION_MIN(7, 0, 1) VLOG(3) << "Setting descriptors."; std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * D * C, 1, W * D * C, D * C, C}; } PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); // Note: PERSISTENT not implemented for inference PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, test_mode ? CUDNN_BATCHNORM_SPATIAL : mode_)); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto handle = dev_ctx.cudnn_handle(); // Now, depending on whether we are running test or not, we have two paths. // It is training mode when it's not reference AND not using pre-trained // model. bool training = !test_mode && !use_global_stats; if (!training) { // only when test we use input to do computation. const auto *est_mean = ctx.Input<Tensor>("Mean"); const auto *est_var = ctx.Input<Tensor>("Variance"); // Run inference mode. PADDLE_ENFORCE_EQ( est_mean->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of mean's dimensions must equal to 1." "But received: the size of mean's dimensions mean is [%d]," "the dimensions of mean is [%s].", est_mean->dims().size(), est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of variance's dimensions must equal to 1." "But received: the size of variance's dimensions is [%d]," "the dimensions of variance is [%s].", est_var->dims().size(), est_var->dims())); PADDLE_ENFORCE_EQ( est_mean->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of mean must equal to the number of " "Channels, which is [%d]. But received: the first dimension" "of mean is [%d], the dimensions of mean is [%s].", C, est_mean->dims()[0], est_mean->dims())); PADDLE_ENFORCE_EQ( est_var->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of variance must equal to the number" "of Channels, which is [%d]. But received: the first dimension of" "variance is [%d], the dimensions of variance is [%s].", C, est_var->dims()[0], est_var->dims())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardInference( handle, // Note: PERSISTENT not implemented for inference CUDNN_BATCHNORM_SPATIAL, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), est_mean->template data<BatchNormParamType<T>>(), est_var->template data<BatchNormParamType<T>>(), epsilon)); } else { // if MomentumTensor is set, use MomentumTensor value, momentum // is only used in this training branch if (ctx.HasInput("MomentumTensor")) { const auto *mom_tensor = ctx.Input<Tensor>("MomentumTensor"); Tensor mom_cpu; TensorCopySync(*mom_tensor, platform::CPUPlace(), &mom_cpu); momentum = mom_cpu.data<float>()[0]; } // Run training mode. // obtain running mean and running inv var, and see if we need to // initialize them. auto *mean_out = ctx.Output<Tensor>("MeanOut"); auto *variance_out = ctx.Output<Tensor>("VarianceOut"); mean_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); variance_out->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); auto *saved_mean = ctx.Output<Tensor>("SavedMean"); auto *saved_variance = ctx.Output<Tensor>("SavedVariance"); saved_mean->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); saved_variance->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, saved_mean, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, saved_variance, static_cast<BatchNormParamType<T>>(0)); if ((N * H * W * D) == 1) { // Only 1 element in normalization dimension, // skip the batch norm calculation, let y = x. framework::TensorCopy(*x, ctx.GetPlace(), y); } else { double this_factor = 1. - momentum; bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; size_t reserve_space_size = 0; void *reserve_space_ptr = nullptr; void *workspace_ptr = nullptr; Tensor workspace_tensor; // Create reserve space and workspace for batch norm. // Create tensor for each batchnorm op, it will be used in the // backward. Thus this tensor shouldn't be temp. auto *reserve_space = ctx.Output<Tensor>("ReserveSpace"); PADDLE_ENFORCE_NOT_NULL( reserve_space, platform::errors::NotFound( "The argument ReserveSpace of batch_norm op is not found.")); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationForwardTrainingExWorkspaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*zDesc=*/nullptr, /*yDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); // -------------- cudnn batchnorm reserve space -------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationTrainingExReserveSpaceSize( /*handle=*/handle, /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*activationDesc=*/nullptr, /*xDesc=*/data_desc_, /*sizeInBytes=*/&reserve_space_size)); reserve_space_ptr = reserve_space->mutable_data( ctx.GetPlace(), transformed_x.type(), reserve_space_size); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTrainingEx( handle, mode_, CUDNN_BATCHNORM_OPS_BN, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), nullptr, nullptr, data_desc_, transformed_y.template data<T>(), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), nullptr, workspace_ptr, workspace_size, reserve_space_ptr, reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationForwardTraining( handle, mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_y.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), this_factor, mean_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), variance_out->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), saved_variance->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()))); } } } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_y, y); } // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, framework::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == framework::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const framework::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const cudaStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, platform::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); KeBNRestoreData<<<grid2, block, 0, stream>>>( layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, framework::DataLayout layout> static __global__ void BNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, cub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == framework::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T> class BatchNormGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument("It must use CUDAPlace.")); double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y")); const auto *scale = ctx.Input<Tensor>("Scale"); const auto *bias = ctx.Input<Tensor>("Bias"); auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X")); auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale")); auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias")); // batch_norm with inplace as false will take X as grad input, which // is same as cuDNN batch_norm backward calculation, batch_norm // with inplace as true only take Y as input and X should be calculate // by inverse operation of batch_norm on Y const Tensor *x; bool is_inplace; if (ctx.HasInput("Y")) { x = ctx.Input<Tensor>("Y"); is_inplace = true; PADDLE_ENFORCE_EQ(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD not inplace in inplace mode")); } else { x = ctx.Input<Tensor>("X"); is_inplace = false; PADDLE_ENFORCE_NE(d_x, d_y, platform::errors::InvalidArgument( "X@GRAD and Y@GRAD inplaced in non-inplace mode")); } const bool is_test = ctx.Attr<bool>("is_test"); PADDLE_ENFORCE_EQ( is_test, false, platform::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const auto &x_dims = x->dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, platform::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output d_x->mutable_data<T>(ctx.GetPlace()); if (d_scale && d_bias) { d_scale->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); d_bias->mutable_data<BatchNormParamType<T>>(ctx.GetPlace()); } PADDLE_ENFORCE_EQ( scale->dims().size(), 1UL, platform::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale->dims().size(), scale->dims())); PADDLE_ENFORCE_EQ( scale->dims()[0], C, platform::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale->dims()[0])); auto dtype = platform::CudnnDataType<T>::type; const auto *reserve_space = ctx.Input<Tensor>("ReserveSpace"); const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && reserve_space != nullptr; auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; Tensor transformed_x(x->type()); Tensor transformed_d_y(d_y->type()); Tensor transformed_d_x(d_x->type()); if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, x, &transformed_x); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_y, &transformed_d_y); ResizeToChannelFirst<platform::CUDADeviceContext, T>(ctx, d_x, &transformed_d_x); } else { transformed_x.ShareDataWith(*x); transformed_d_y.ShareDataWith(*d_y); transformed_d_x.ShareDataWith(*d_x); } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); const int num = transformed_x.numel(); const int block = 512; int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); auto stream = dev_ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); math::SetConstant<platform::CUDADeviceContext, BatchNormParamType<T>> functor; functor(dev_ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(dev_ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnCreateTensorDescriptor(&bn_param_desc_)); if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #if CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else mode_ = CUDNN_BATCHNORM_SPATIAL; #endif // CUDNN_VERSION_MIN(7, 0, 1) PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDeriveBNTensorDescriptor(bn_param_desc_, data_desc_, mode_)); const auto *saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *saved_var = ctx.Input<Tensor>("SavedVariance"); const auto *saved_mean_data = saved_mean->template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_var->template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_scale && d_bias) { bool called = false; #if CUDNN_VERSION_MIN(7, 4, 1) called = true; size_t workspace_size = 0; void *workspace_ptr = nullptr; Tensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_ptr = workspace_tensor.mutable_data( ctx.GetPlace(), transformed_x.type(), workspace_size); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/dev_ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/transformed_d_x.template mutable_data<T>( ctx.GetPlace()), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale->template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/d_scale ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*dBnBiasData=*/d_bias ->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/const_cast<T *>( reserve_space->template data<T>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #endif // CUDNN_VERSION_MIN(7, 4, 1) if (!called) { PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, transformed_d_x.template mutable_data<T>(ctx.GetPlace()), bn_param_desc_, scale->template data<BatchNormParamType<T>>(), d_scale->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), d_bias->template mutable_data<BatchNormParamType<T>>( ctx.GetPlace()), epsilon, saved_mean_data, saved_var_data)); } if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<paddle::platform::CUDADeviceContext, T>( ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { BNBackwardData<T, block, framework::DataLayout::kNCHW><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } } else { if (d_x) { BNBackwardData<T, block, framework::DataLayout::kNHWC><<< grid2, block, 0, dev_ctx.stream()>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), saved_mean_data, x->data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } } } // clean when exit. PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_CUDA_SUCCESS( platform::dynload::cudnnDestroyTensorDescriptor(bn_param_desc_)); } else { const auto *running_mean = ctx.Input<Tensor>("Mean"); const auto *running_var = ctx.Input<Tensor>("Variance"); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = *x; inplace_functor(data_layout, px.mutable_data<T>(ctx.GetPlace()), scale->template data<BatchNormParamType<T>>(), bias->template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x->data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { KeBNBackwardData< T, framework::DataLayout::kNCHW><<<grid1, block, 0, stream>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNCHW><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { KeBNBackwardData< T, framework::DataLayout::kNHWC><<<grid1, block, 0, stream>>>( d_y->data<T>(), scale->data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias< T, block, framework::DataLayout::kNHWC><<<grid2, block, 0, stream>>>( d_y->data<T>(), x->data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } }; template <typename T> class BatchNormDoubleGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { const auto *X = ctx.Input<Tensor>("X"); const auto *Scale = ctx.Input<Tensor>("Scale"); const auto *dY = ctx.Input<Tensor>("DY"); const auto *Saved_mean = ctx.Input<Tensor>("SavedMean"); const auto *Saved_variance = ctx.Input<Tensor>("SavedVariance"); const double epsilon = static_cast<double>(ctx.Attr<float>("epsilon")); const bool use_global_stats = ctx.Attr<bool>("use_global_stats"); const bool is_test = ctx.Attr<bool>("is_test"); PADDLE_ENFORCE_EQ( is_test, false, platform::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); const auto *ddX = ctx.Input<Tensor>("DDX"); const auto *ddScale = ctx.Input<Tensor>("DDScale"); const auto *ddBias = ctx.Input<Tensor>("DDBias"); auto *dX = ctx.Output<Tensor>("DX"); auto *dScale = ctx.Output<Tensor>("DScale"); auto *ddY = ctx.Output<Tensor>("DDY"); NormDoubleGradFunctor<platform::CUDADeviceContext, T>( ctx, data_layout, X, Scale, dY, Saved_mean, Saved_variance, epsilon, use_global_stats, ddX, ddScale, ddBias, dX, dScale, ddY); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( batch_norm, ops::BatchNormKernel<plat::CUDADeviceContext, float>, ops::BatchNormKernel<plat::CUDADeviceContext, double>, ops::BatchNormKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad, ops::BatchNormGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormGradKernel<plat::CUDADeviceContext, double>, ops::BatchNormGradKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( batch_norm_grad_grad, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, float>, ops::BatchNormDoubleGradKernel<plat::CUDADeviceContext, double>);
2bff56d92e2a2e113c89ff48082ee429560231a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/clip2img_layer.hpp" namespace caffe { template <typename Dtype> __global__ void clip2img_forward(const int nthreads, const Dtype* bottom, const int num, const int channels, const int depth, const int height, const int width, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % width; const int h = index / width % height; const int d = index / width / height % depth; const int c = index / width / height / depth % channels; const int n = index / width / height / depth / channels; int out_index = (n * depth + d) * channels * height * width + c * height * width + h * width + w; top[out_index] = bottom[index]; } } template <typename Dtype> void Clip2ImgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int nthreads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( clip2img_forward<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom[0]->gpu_data(), bottom_axes_[0], bottom_axes_[1], bottom_axes_[2], bottom_axes_[3], bottom_axes_[4], top[0]->mutable_gpu_data()); } template <typename Dtype> __global__ void clip2img_backward( const int nthreads, Dtype* bottom, const int num, const int channels, const int depth, const int height, const int width, const Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % width; const int h = index / width % height; const int d = index / width / height % depth; const int c = index / width / height / depth % channels; const int n = index / width / height / depth / channels; int top_index = (n * depth + d) * channels * height * width + c * height * width + h * width + w; bottom[index] = top[top_index]; } } template <typename Dtype> void Clip2ImgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int nthreads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( clip2img_backward<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom[0]->mutable_gpu_diff(), bottom_axes_[0], bottom_axes_[1], bottom_axes_[2], bottom_axes_[3], bottom_axes_[4], top[0]->gpu_diff()); } INSTANTIATE_LAYER_GPU_FUNCS(Clip2ImgLayer); } // namespace caffe
2bff56d92e2a2e113c89ff48082ee429560231a4.cu
#include <vector> #include "caffe/layers/clip2img_layer.hpp" namespace caffe { template <typename Dtype> __global__ void clip2img_forward(const int nthreads, const Dtype* bottom, const int num, const int channels, const int depth, const int height, const int width, Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % width; const int h = index / width % height; const int d = index / width / height % depth; const int c = index / width / height / depth % channels; const int n = index / width / height / depth / channels; int out_index = (n * depth + d) * channels * height * width + c * height * width + h * width + w; top[out_index] = bottom[index]; } } template <typename Dtype> void Clip2ImgLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int nthreads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) clip2img_forward<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom[0]->gpu_data(), bottom_axes_[0], bottom_axes_[1], bottom_axes_[2], bottom_axes_[3], bottom_axes_[4], top[0]->mutable_gpu_data()); } template <typename Dtype> __global__ void clip2img_backward( const int nthreads, Dtype* bottom, const int num, const int channels, const int depth, const int height, const int width, const Dtype* top) { CUDA_KERNEL_LOOP(index, nthreads) { const int w = index % width; const int h = index / width % height; const int d = index / width / height % depth; const int c = index / width / height / depth % channels; const int n = index / width / height / depth / channels; int top_index = (n * depth + d) * channels * height * width + c * height * width + h * width + w; bottom[index] = top[top_index]; } } template <typename Dtype> void Clip2ImgLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int nthreads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) clip2img_backward<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, bottom[0]->mutable_gpu_diff(), bottom_axes_[0], bottom_axes_[1], bottom_axes_[2], bottom_axes_[3], bottom_axes_[4], top[0]->gpu_diff()); } INSTANTIATE_LAYER_GPU_FUNCS(Clip2ImgLayer); } // namespace caffe
9400b03561ca790d7a2273b3a29b0e552dc222e4.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hip/hip_runtime.h> #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { namespace math { template <typename T, int TPB> __device__ inline void LayerNormSmall(T val, const kvp<T> &thread_data, const int ld, const int idx, const float *bias, const float *scale, T *output, T eps) { using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.key; rsigma = rsqrt(sum_kv.value - mu * mu + eps); } __syncthreads(); if (threadIdx.x < ld) { const T g(scale[threadIdx.x]); const T b(bias[threadIdx.x]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, int TPB> __device__ inline void LayerNorm(const kvp<T> &thread_data, const int ld, const int offset, const float *bias, const float *scale, T *output, T eps) { using BlockReduce = hipcub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.key; rsigma = rsqrt(sum_kv.value - mu * mu + eps); } __syncthreads(); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = output[idx]; const T g(scale[i]); const T b(bias[i]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, unsigned TPB> __global__ void EmbEltwiseLayernormKernel(int hidden, const int64_t *ids, const float *scale, const float *bias, const int64_t *embs, T *output, float eps, int input_num) { hipcub::Sum pair_sum; // blockIdx.x: position in the sequence // blockIdx.y: batch // gridDim.x: Seq // gridDim.y: Batch extern __shared__ int64_t array_id[]; const T rhidden = T(1.f) / T(hidden); const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { for (int i = 0; i < input_num; ++i) { const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]); array_id[i] = ids_p[seq_pos]; } } __syncthreads(); const int64_t out_offset = seq_pos * hidden; kvp<T> thread_data(0, 0); #pragma unroll for (int it = threadIdx.x; it < hidden; it += TPB) { T val = 0; for (int i = 0; i < input_num; ++i) { val += reinterpret_cast<const T *>(embs[i])[array_id[i] * hidden + it]; } output[out_offset + it] = val; const T rhiddenval = rhidden * val; thread_data = pair_sum(thread_data, kvp<T>(rhiddenval, rhiddenval * val)); } LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps); } template <typename T> void EmbEltwiseLayerNormFunctor<T>::operator()( int batch, int seq_len, int hidden, const int64_t *ids, const float *scale, const float *bias, const int64_t *embs, T *output, float eps, int input_num, hipStream_t stream) { const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); int shared_bytes = input_num * sizeof(int64_t); hipLaunchKernelGGL(( EmbEltwiseLayernormKernel<T, tpb>), dim3(grid), dim3(block), shared_bytes, stream, hidden, ids, scale, bias, embs, output, eps, input_num); } template class EmbEltwiseLayerNormFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayerNormFunctor<half>; #endif template <typename T> __global__ void SoftmaxKernelWithEltadd(T *qk_buf_, const T *bias_qk_, const int batch_size, const int head_num, const int seq_len, const unsigned mask) { int qk_offset = blockIdx.x * seq_len; assert(blockDim.x % 32 == 0); __shared__ float s_sum, s_max; float qk = threadIdx.x < seq_len ? static_cast<float>((qk_buf_[threadIdx.x + qk_offset] + bias_qk_[threadIdx.x + qk_offset])) : 0.0f; float tmp = threadIdx.x < seq_len ? static_cast<float>(qk) : -1e20f; float max_val = blockReduceMax<float>(tmp, mask); if (threadIdx.x == 0) s_max = max_val; __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf(static_cast<float>(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp, mask); if (threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if (threadIdx.x < seq_len) qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum); } template <typename T> inline void MatMulWithHeadQK(const platform::CUDADeviceContext &context, int head_num, int seq_len, int size_per_head, int batch_size, bool q_trans, bool k_trans, T *q_buf_, T *k_buf_, T *qk_buf_, const T *bias_qk, T alpha, T beta) { CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans; typedef typename CUDATypeTraits<T>::TYPE run_type; auto blas = operators::math::GetBlas<platform::CUDADeviceContext, run_type>(context); auto stream = context.stream(); blas.BatchedGEMM( transA, transB, seq_len, seq_len, size_per_head, static_cast<run_type>(alpha), reinterpret_cast<run_type *>(q_buf_), reinterpret_cast<run_type *>(k_buf_), static_cast<run_type>(beta), reinterpret_cast<run_type *>(qk_buf_), batch_size * head_num, seq_len * size_per_head, seq_len * size_per_head); int grid = batch_size * head_num * seq_len; int block = seq_len; // Align block to 32, also limit seq_len to max block size. PADDLE_ENFORCE_LE(seq_len, 1024, platform::errors::InvalidArgument( "seq_len should <= 1024, " "but received seq_len is:%d", seq_len)); if (seq_len <= 32) block = 32; else if (seq_len > 32 && seq_len <= 64) block = 64; else if (seq_len > 64 && seq_len <= 128) block = 128; else if (seq_len > 128 && seq_len <= 256) block = 256; else if (seq_len > 256 && seq_len <= 512) block = 512; else block = 1024; hipLaunchKernelGGL(( SoftmaxKernelWithEltadd<T>), dim3(grid), dim3(block), 0, stream, qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK); } template <typename T> inline void MatMulWithHeadQKV(const platform::CUDADeviceContext &context, int head_num, int seq_len, int size_per_head, int batch_size, bool qk_trans, bool v_trans, T *v_buf_, const T *qk_buf_, T *dst, T alpha, T beta) { int m = batch_size * seq_len; int k = head_num * size_per_head; typedef typename CUDATypeTraits<T>::TYPE run_type; auto blas = operators::math::GetBlas<platform::CUDADeviceContext, run_type>(context); auto stream = context.stream(); CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans; blas.BatchedGEMM( transA, transB, seq_len, size_per_head, seq_len, static_cast<run_type>(alpha), reinterpret_cast<const run_type *>(qk_buf_), reinterpret_cast<run_type *>(v_buf_), static_cast<run_type>(beta), reinterpret_cast<run_type *>(dst), batch_size * head_num, seq_len * seq_len, seq_len * size_per_head); } template <typename T> void MultiHeadGPUComputeFunctor<T>::operator()( const platform::CUDADeviceContext &dev_ctx, int batch, int seq_len, int head_num, int head_size, T *qkptr, const T *bias_qk_ptr, T *tptr, T alpha, T beta) { auto stream = dev_ctx.stream(); const int tsize = batch * head_num * seq_len * head_size; T *qptr = tptr; T *kptr = qptr + tsize; T *vptr = kptr + tsize; // batch gemm stride, softmaxwithscale. MatMulWithHeadQK<T>(dev_ctx, head_num, seq_len, head_size, batch, false, true, qptr, kptr, qkptr, bias_qk_ptr, alpha, beta); // batch gemm stride, transpose. MatMulWithHeadQKV<T>(dev_ctx, head_num, seq_len, head_size, batch, false, false, vptr, qkptr, tptr, T(1.0), beta); } template class MultiHeadGPUComputeFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class MultiHeadGPUComputeFunctor<half>; #endif template <typename T, unsigned TPB> __global__ void SkipLayerNormSmallKernel(int num, int hidden, const T *input1, const T *input2, T *output, const float *scale, const float *bias, float eps) { const T rld = T(1) / T(hidden); const int offset = blockIdx.x * hidden; hipcub::Sum pair_sum; kvp<T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < hidden) { val = input1[idx] + input2[idx]; const T rldval = rld * val; thread_data = pair_sum(thread_data, kvp<T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, hidden, idx, bias, scale, output, eps); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel(int num, int hidden, const T *input1, const T *input2, T *output, const float *scale, const float *bias, float eps) { const T rld = T(1) / T(hidden); const int offset = blockIdx.x * hidden; hipcub::Sum pair_sum; kvp<T> thread_data(0, 0); for (int it = threadIdx.x; it < hidden; it += TPB) { const int idx = offset + it; const T val = input1[idx] + input2[idx]; const T rldval = rld * val; thread_data = pair_sum(thread_data, kvp<T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, hidden, offset, bias, scale, output, eps); } template <typename T> void SkipLayerNormFunctor<T>::operator()(const int num, const int hidden, const T *input1, const T *input2, const float *scale, const float *bias, T *output, T eps, hipStream_t stream) { int block = num / hidden; if (hidden <= 32) { const int threads = 32; hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream, num, hidden, input1, input2, output, scale, bias, eps); } else if (hidden <= 128) { const int threads = 128; hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream, num, hidden, input1, input2, output, scale, bias, eps); } else if (hidden == 384) { const int threads = 384; hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream, num, hidden, input1, input2, output, scale, bias, eps); } else { const int threads = 256; hipLaunchKernelGGL(( SkipLayerNormKernel<T, threads>), dim3(block), dim3(threads), 0, stream, num, hidden, input1, input2, output, scale, bias, eps); } } template class SkipLayerNormFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class SkipLayerNormFunctor<half>; #endif } // namespace math } // namespace operators } // namespace paddle
9400b03561ca790d7a2273b3a29b0e552dc222e4.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cuda_runtime.h> #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_cuda_utils.h" #include "paddle/fluid/platform/enforce.h" namespace paddle { namespace operators { namespace math { template <typename T, int TPB> __device__ inline void LayerNormSmall(T val, const kvp<T> &thread_data, const int ld, const int idx, const float *bias, const float *scale, T *output, T eps) { using BlockReduce = cub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.key; rsigma = rsqrt(sum_kv.value - mu * mu + eps); } __syncthreads(); if (threadIdx.x < ld) { const T g(scale[threadIdx.x]); const T b(bias[threadIdx.x]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, int TPB> __device__ inline void LayerNorm(const kvp<T> &thread_data, const int ld, const int offset, const float *bias, const float *scale, T *output, T eps) { using BlockReduce = cub::BlockReduce<kvp<T>, TPB>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T mu; // mean __shared__ T rsigma; // 1 / std.dev. const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum()); if (threadIdx.x == 0) { mu = sum_kv.key; rsigma = rsqrt(sum_kv.value - mu * mu + eps); } __syncthreads(); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = output[idx]; const T g(scale[i]); const T b(bias[i]); output[idx] = g * (val - mu) * rsigma + b; } } template <typename T, unsigned TPB> __global__ void EmbEltwiseLayernormKernel(int hidden, const int64_t *ids, const float *scale, const float *bias, const int64_t *embs, T *output, float eps, int input_num) { cub::Sum pair_sum; // blockIdx.x: position in the sequence // blockIdx.y: batch // gridDim.x: Seq // gridDim.y: Batch extern __shared__ int64_t array_id[]; const T rhidden = T(1.f) / T(hidden); const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { for (int i = 0; i < input_num; ++i) { const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]); array_id[i] = ids_p[seq_pos]; } } __syncthreads(); const int64_t out_offset = seq_pos * hidden; kvp<T> thread_data(0, 0); #pragma unroll for (int it = threadIdx.x; it < hidden; it += TPB) { T val = 0; for (int i = 0; i < input_num; ++i) { val += reinterpret_cast<const T *>(embs[i])[array_id[i] * hidden + it]; } output[out_offset + it] = val; const T rhiddenval = rhidden * val; thread_data = pair_sum(thread_data, kvp<T>(rhiddenval, rhiddenval * val)); } LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps); } template <typename T> void EmbEltwiseLayerNormFunctor<T>::operator()( int batch, int seq_len, int hidden, const int64_t *ids, const float *scale, const float *bias, const int64_t *embs, T *output, float eps, int input_num, cudaStream_t stream) { const unsigned tpb = 256; const dim3 grid(seq_len, batch, 1); const dim3 block(tpb, 1, 1); int shared_bytes = input_num * sizeof(int64_t); EmbEltwiseLayernormKernel<T, tpb><<<grid, block, shared_bytes, stream>>>( hidden, ids, scale, bias, embs, output, eps, input_num); } template class EmbEltwiseLayerNormFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class EmbEltwiseLayerNormFunctor<half>; #endif template <typename T> __global__ void SoftmaxKernelWithEltadd(T *qk_buf_, const T *bias_qk_, const int batch_size, const int head_num, const int seq_len, const unsigned mask) { int qk_offset = blockIdx.x * seq_len; assert(blockDim.x % 32 == 0); __shared__ float s_sum, s_max; float qk = threadIdx.x < seq_len ? static_cast<float>((qk_buf_[threadIdx.x + qk_offset] + bias_qk_[threadIdx.x + qk_offset])) : 0.0f; float tmp = threadIdx.x < seq_len ? static_cast<float>(qk) : -1e20f; float max_val = blockReduceMax<float>(tmp, mask); if (threadIdx.x == 0) s_max = max_val; __syncthreads(); float qk_tmp = threadIdx.x < seq_len ? __expf(static_cast<float>(tmp - s_max)) : 0.0f; float sum_val = blockReduceSum<float>(qk_tmp, mask); if (threadIdx.x == 0) { s_sum = sum_val + 1e-6f; } __syncthreads(); if (threadIdx.x < seq_len) qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum); } template <typename T> inline void MatMulWithHeadQK(const platform::CUDADeviceContext &context, int head_num, int seq_len, int size_per_head, int batch_size, bool q_trans, bool k_trans, T *q_buf_, T *k_buf_, T *qk_buf_, const T *bias_qk, T alpha, T beta) { CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans; typedef typename CUDATypeTraits<T>::TYPE run_type; auto blas = operators::math::GetBlas<platform::CUDADeviceContext, run_type>(context); auto stream = context.stream(); blas.BatchedGEMM( transA, transB, seq_len, seq_len, size_per_head, static_cast<run_type>(alpha), reinterpret_cast<run_type *>(q_buf_), reinterpret_cast<run_type *>(k_buf_), static_cast<run_type>(beta), reinterpret_cast<run_type *>(qk_buf_), batch_size * head_num, seq_len * size_per_head, seq_len * size_per_head); int grid = batch_size * head_num * seq_len; int block = seq_len; // Align block to 32, also limit seq_len to max block size. PADDLE_ENFORCE_LE(seq_len, 1024, platform::errors::InvalidArgument( "seq_len should <= 1024, " "but received seq_len is:%d", seq_len)); if (seq_len <= 32) block = 32; else if (seq_len > 32 && seq_len <= 64) block = 64; else if (seq_len > 64 && seq_len <= 128) block = 128; else if (seq_len > 128 && seq_len <= 256) block = 256; else if (seq_len > 256 && seq_len <= 512) block = 512; else block = 1024; SoftmaxKernelWithEltadd<T><<<grid, block, 0, stream>>>( qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK); } template <typename T> inline void MatMulWithHeadQKV(const platform::CUDADeviceContext &context, int head_num, int seq_len, int size_per_head, int batch_size, bool qk_trans, bool v_trans, T *v_buf_, const T *qk_buf_, T *dst, T alpha, T beta) { int m = batch_size * seq_len; int k = head_num * size_per_head; typedef typename CUDATypeTraits<T>::TYPE run_type; auto blas = operators::math::GetBlas<platform::CUDADeviceContext, run_type>(context); auto stream = context.stream(); CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans; CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans; blas.BatchedGEMM( transA, transB, seq_len, size_per_head, seq_len, static_cast<run_type>(alpha), reinterpret_cast<const run_type *>(qk_buf_), reinterpret_cast<run_type *>(v_buf_), static_cast<run_type>(beta), reinterpret_cast<run_type *>(dst), batch_size * head_num, seq_len * seq_len, seq_len * size_per_head); } template <typename T> void MultiHeadGPUComputeFunctor<T>::operator()( const platform::CUDADeviceContext &dev_ctx, int batch, int seq_len, int head_num, int head_size, T *qkptr, const T *bias_qk_ptr, T *tptr, T alpha, T beta) { auto stream = dev_ctx.stream(); const int tsize = batch * head_num * seq_len * head_size; T *qptr = tptr; T *kptr = qptr + tsize; T *vptr = kptr + tsize; // batch gemm stride, softmaxwithscale. MatMulWithHeadQK<T>(dev_ctx, head_num, seq_len, head_size, batch, false, true, qptr, kptr, qkptr, bias_qk_ptr, alpha, beta); // batch gemm stride, transpose. MatMulWithHeadQKV<T>(dev_ctx, head_num, seq_len, head_size, batch, false, false, vptr, qkptr, tptr, T(1.0), beta); } template class MultiHeadGPUComputeFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class MultiHeadGPUComputeFunctor<half>; #endif template <typename T, unsigned TPB> __global__ void SkipLayerNormSmallKernel(int num, int hidden, const T *input1, const T *input2, T *output, const float *scale, const float *bias, float eps) { const T rld = T(1) / T(hidden); const int offset = blockIdx.x * hidden; cub::Sum pair_sum; kvp<T> thread_data(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < hidden) { val = input1[idx] + input2[idx]; const T rldval = rld * val; thread_data = pair_sum(thread_data, kvp<T>(rldval, rldval * val)); } LayerNormSmall<T, TPB>(val, thread_data, hidden, idx, bias, scale, output, eps); } template <typename T, unsigned TPB> __global__ void SkipLayerNormKernel(int num, int hidden, const T *input1, const T *input2, T *output, const float *scale, const float *bias, float eps) { const T rld = T(1) / T(hidden); const int offset = blockIdx.x * hidden; cub::Sum pair_sum; kvp<T> thread_data(0, 0); for (int it = threadIdx.x; it < hidden; it += TPB) { const int idx = offset + it; const T val = input1[idx] + input2[idx]; const T rldval = rld * val; thread_data = pair_sum(thread_data, kvp<T>(rldval, rldval * val)); output[idx] = val; } LayerNorm<T, TPB>(thread_data, hidden, offset, bias, scale, output, eps); } template <typename T> void SkipLayerNormFunctor<T>::operator()(const int num, const int hidden, const T *input1, const T *input2, const float *scale, const float *bias, T *output, T eps, cudaStream_t stream) { int block = num / hidden; if (hidden <= 32) { const int threads = 32; SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>( num, hidden, input1, input2, output, scale, bias, eps); } else if (hidden <= 128) { const int threads = 128; SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>( num, hidden, input1, input2, output, scale, bias, eps); } else if (hidden == 384) { const int threads = 384; SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>( num, hidden, input1, input2, output, scale, bias, eps); } else { const int threads = 256; SkipLayerNormKernel<T, threads><<<block, threads, 0, stream>>>( num, hidden, input1, input2, output, scale, bias, eps); } } template class SkipLayerNormFunctor<float>; #ifdef SUPPORTS_CUDA_FP16 template class SkipLayerNormFunctor<half>; #endif } // namespace math } // namespace operators } // namespace paddle
76254469f5afb2b0763d7d7c60f4f6b86c6efc20.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudaAPI #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, size_t size); __global__ void addKernel( int *c, const int *a ) { int i = threadIdx.x; // extern __shared__ int smem[]; smem[i]=a[i]; __syncthreads(); if (i == 0) // 0 { c[0] = 0; for (int d = 0; d < 5; d++) { c[0] += smem[d] * smem[d]; } } if (i == 1) // 1 { c[1] = 0; for (int d = 0; d < 5; d++) { c[1] += smem[d]; } } if (i == 2) // 2 { c[2] = 1; for (int d = 0; d < 5; d++) { c[2] *= smem[d]; } } } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; int c[arraySize] = { 0 }; hipError_t cudaStatus = addWithCuda(c, a, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\t1+2+3+4+5 = %d\n\n\t1^2+2^2+3^2+4^2+5^2 = %d\n\n\t1*2*3*4*5 = %d\n\n\n\n\n\n\n",c[1],c[0],c[2]); cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. // //hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) hipError_t addWithCuda(int *c, const int *a, size_t size) { int *dev_a = 0; // GPU int *dev_c = 0; hipError_t cudaStatus; // // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); // if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . // GPU cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. // GPU cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } addKernel << <1, size, size*sizeof(int), 0 >> >(dev_c, dev_a); cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess){ fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. // . cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); // GPU hipFree(dev_a); return cudaStatus; }
76254469f5afb2b0763d7d7c60f4f6b86c6efc20.cu
#include "cuda_runtime.h" // cuda运行时API #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, size_t size); __global__ void addKernel( int *c, const int *a ) { int i = threadIdx.x; // 这是线程并行的代码 extern __shared__ int smem[]; smem[i]=a[i]; __syncthreads(); if (i == 0) // 0号线程做平方和 { c[0] = 0; for (int d = 0; d < 5; d++) { c[0] += smem[d] * smem[d]; } } if (i == 1) // 1号线程做累加 { c[1] = 0; for (int d = 0; d < 5; d++) { c[1] += smem[d]; } } if (i == 2) // 2号线程做累乘 { c[2] = 1; for (int d = 0; d < 5; d++) { c[2] *= smem[d]; } } } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; int c[arraySize] = { 0 }; cudaError_t cudaStatus = addWithCuda(c, a, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("\t1+2+3+4+5 = %d\n\n\t1^2+2^2+3^2+4^2+5^2 = %d\n\n\t1*2*3*4*5 = %d\n\n\n\n\n\n\n",c[1],c[0],c[2]); cudaStatus = cudaThreadExit(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaThreadExit failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. // 重点理解这个函数 //cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) cudaError_t addWithCuda(int *c, const int *a, size_t size) { int *dev_a = 0; // GPU设备端数据指针 int *dev_c = 0; cudaError_t cudaStatus; // 状态指示 // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); // 选择运行平台 if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . // 分配GPU设备端内存 cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. // 拷贝数据到GPU cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } addKernel << <1, size, size*sizeof(int), 0 >> >(dev_c, dev_a); cudaStatus = cudaThreadSynchronize(); if (cudaStatus != cudaSuccess){ fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. // 拷贝结构回主机. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); // 释放GPU设备端内存 cudaFree(dev_a); return cudaStatus; }
59a226118c40710508193054d25c22560736389f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "neural_net.h" #include <time.h> #include <cstdio> #include <string> template <typename T> __global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } template <typename T> __global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; loss[i] = -logf(O[i * num_classes + y[i]] + eps); } template <typename T> __global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; T max = O[i * num_classes]; int index = 0; for (int j = 1; j < num_classes; j++) { if (O[i * num_classes + j] > max) { max = O[i * num_classes + j]; index = j; } } pred_y[i] = index; } float NeuralNet::computeLoss() { if (layer_type[num_layers - 1] == SOFTMAX) { if (data_type == CUDNN_DATA_FLOAT) hipLaunchKernelGGL(( computeSoftmaxLoss<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); else if (data_type == CUDNN_DATA_DOUBLE) hipLaunchKernelGGL(( computeSoftmaxLoss<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); } checkCudaErrors(hipMemcpy(h_loss, loss, batch_size * sizeof(float), hipMemcpyDeviceToHost)); float total_loss = 0.0; for (int i = 0; i < batch_size; i++) total_loss += h_loss[i]; return total_loss / batch_size; } void NeuralNet::compareOutputCorrect(int *correct_count, int *y) { *correct_count = 0; if (data_type == CUDNN_DATA_FLOAT) { float *typecast_O = (float *)layer_input[num_layers - 1]; hipLaunchKernelGGL(( inferClass<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } else if (data_type == CUDNN_DATA_DOUBLE) { double *typecast_O = (double *)layer_input[num_layers - 1]; hipLaunchKernelGGL(( inferClass<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(hipMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), hipMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } } NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format, long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) { // ---------------------- vDNN start ---------------------- checkCudaErrors(hipStreamCreate(&stream_compute)); checkCudaErrors(hipStreamCreate(&stream_memory)); this->vdnn_type = vdnn_type; this->vdnn_conv_algo = vdnn_conv_algo; // ---------------------- vDNN end ------------------------ // create handle checkCUDNN(cudnnCreate(&cudnn_handle)); checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute)); checkCUBLAS(hipblasCreate(&cublas_handle)); checkCUBLAS(hipblasSetStream(cublas_handle, stream_compute)); checkCURAND(hiprandCreateGenerator(&curand_gen, HIPRAND_RNG_PSEUDO_DEFAULT)); checkCURAND(hiprandSetStream(curand_gen, stream_compute)); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); init_free_bytes = free_bytes; std::cout << "Free bytes at start: " << free_bytes << std::endl; pre_alloc_conv_derivative = false; pre_alloc_fc_derivative = false; pre_alloc_batch_norm_derivative = true; if (vdnn_type == vDNN_NONE) { pre_alloc_conv_derivative = true; pre_alloc_fc_derivative = true; pre_alloc_batch_norm_derivative = true; } if (data_type == DATA_FLOAT) { this->data_type = CUDNN_DATA_FLOAT; data_type_size = sizeof(float); } else if (data_type == DATA_DOUBLE) { this->data_type = CUDNN_DATA_DOUBLE; data_type_size = sizeof(double); } if (tensor_format == TENSOR_NCHW) this->tensor_format = CUDNN_TENSOR_NCHW; else if (tensor_format == TENSOR_NHWC) this->tensor_format = CUDNN_TENSOR_NHWC; this->batch_size = batch_size; this->softmax_eps = softmax_eps; this->init_std_dev = init_std_dev; num_layers = layers.size(); // allocation of space for input to each layer layer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int)); dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); params = (void **)malloc(num_layers * sizeof(void *)); LayerDimension prev_output_size; LayerDimension current_output_size; for (int i = 0; i < num_layers; i++) { layer_type.push_back(layers[i].type); if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; params[i] = malloc(sizeof(ConvLayerParams)); ((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, data_type_size, current_output_size, update_rule); } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; params[i] = malloc(sizeof(FCLayerParams)); ((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type, current_output_size, update_rule); } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; params[i] = malloc(sizeof(DropoutLayerParams)); ((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, current_output_size); } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size, update_rule); } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; params[i] = malloc(sizeof(ActivationLayerParams)); ((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; params[i] = malloc(sizeof(SoftmaxLayerParams)); ((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); // std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl; } if (i == 0) { prev_output_size = current_output_size; } // incomplete - have to check flatten and check exact dimension // else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) { // std::cout << "Layer " << i << " output and next layer's input size mismatch\n"; // exit(0); // } } // ---------------------- vDNN start ---------------------- // allocate space in host memory for layers to be transferred h_layer_input = (void **)malloc(num_layers * sizeof(void *)); to_offload = (bool *)malloc(num_layers * sizeof(bool)); prefetched = (bool *)malloc(num_layers * sizeof(bool)); // ---------------------- vDNN end ------------------------ checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl; // allocate space for parameters // Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input for (int i = 0; i < num_layers; i++) { size_t input_size; if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; ((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_conv_derivative); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; ((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_fc_derivative); input_size = batch_size * user_params->input_channels; if (i == 0) { input_channels = user_params->input_channels; input_h = 1; input_w = 1; } } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; ((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; ((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size, free_bytes, pre_alloc_batch_norm_derivative); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; ((PoolingLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; ((ActivationLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; ((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; // assuming this is last layer, allocate for next layer as well // checkCudaErrors(hipMalloc(&layer_input[i + 1], input_size * data_type_size)); // checkCudaErrors(hipMalloc(&dlayer_input[i + 1], input_size * data_type_size)); layer_input_size[i + 1] = input_size; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } if (i == num_layers - 1) { num_classes = user_params->channels; } } // do not allocate memory initially // checkCudaErrors(hipMalloc(&layer_input[i], input_size * data_type_size)); // checkCudaErrors(hipMalloc(&dlayer_input[i], input_size * data_type_size)); // ---------------------- vDNN start ---------------------- layer_input_size[i] = input_size; // ---------------------- vDNN end ------------------------ } checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl; // very small - could be allocated initially itself checkCudaErrors(hipMalloc((void **)&y, batch_size * sizeof(int))); checkCudaErrors(hipMalloc((void **)&pred_y, batch_size * sizeof(int))); checkCudaErrors(hipMalloc((void **)&loss, batch_size * sizeof(float))); checkCudaErrors(hipMalloc(&one_vec, batch_size * data_type_size)); if (this->data_type == CUDNN_DATA_FLOAT) hipLaunchKernelGGL(( fillValue<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (float *)one_vec, batch_size, 1); else hipLaunchKernelGGL(( fillValue<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, (double *)one_vec, batch_size, 1); checkCudaErrors(hipHostMalloc((void **)&h_loss, batch_size * sizeof(float))); checkCudaErrors(hipHostMalloc((void **)&h_pred_y, batch_size * sizeof(int))); // do not allocate workspace initially // allocate space for workspace and also keep track of algo // size_t cur_workspace_size; // workspace_size = 0; // for (int i = 0; i < num_layers; i++) { // if (layers[i].type == CONV) { // ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes); // if (cur_workspace_size > workspace_size) // workspace_size = cur_workspace_size; // } // } // checkCudaErrors(hipMalloc(&workspace, workspace_size)); // free_bytes = free_bytes - workspace_size; checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemGetInfo(&free_bytes, &total_bytes)); // leave 600 MB and use the rest std::cout << "Free bytes: " << free_bytes << std::endl; free_bytes -= 1024 * 1024 * 600; // ---------------------- vDNN start ---------------------- size_t exp_max_consume, max_consume; vDNNOptimize(exp_max_consume, max_consume); std::cout << "actual_max_consume: " << max_consume << std::endl; std::cout << "exp_max_consume: " << exp_max_consume << std::endl; std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_free_bytes(MB): " << (free_bytes + 1024 * 1024 * 600 - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl; std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - max_consume)) / (1.0 * 1024 * 1024) << std::endl; // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- free_bytes = max_consume; cnmemDevice_t cnmem_device; size_t cnmem_stream_memory_size = free_bytes; cnmem_device.device = 0; cnmem_device.size = cnmem_stream_memory_size; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; // do not allow call to hipMalloc checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- for (int i = 0; i < num_layers; i++) { std::cerr << "to_offload[i] " << to_offload[i] << std::endl; } for (int i = 0; i < num_layers; i++) { // allocate pinned memory in host if (to_offload[i]) checkCudaErrors(hipHostMalloc(&h_layer_input[i], layer_input_size[i] * data_type_size)); } // ---------------------- vDNN end ------------------------ checkCudaErrors(hipDeviceSynchronize()); size_t temp_free_bytes; checkCudaErrors(hipMemGetInfo(&temp_free_bytes, &total_bytes)); std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl; // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } // data of time checkCudaErrors(hipEventCreate(&start_compute)); checkCudaErrors(hipEventCreate(&stop_compute)); checkCudaErrors(hipEventCreate(&start_transfer)); checkCudaErrors(hipEventCreate(&stop_transfer)); } bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) { CnmemSpace space_tracker(free_bytes); max_consume = 0; // forward pass // allocate space for 1st input std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl; std::cerr << "Forward pass" << std::endl; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) break; std::cerr << "Processing layer " << i << std::endl; std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; size_t cur_workspace_size; checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size)); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); space_tracker.updateMaxConsume(max_consume); if (!space_tracker.isAvailable()) return false; std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; // deallocate layer input if (to_offload[i]) { std::cerr << "deallocating input to " << i << std::endl; space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } std::cerr << "Backward pass" << std::endl; if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) { std::cout << "Panic!! Using wrong size\n"; exit(0); } // backward pass space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); // std::cerr << "max_consume: " << max_consume << std::endl; for (int i = num_layers - 1; i >= 0; i--) { // allocate space for previous layer derivative std::cerr << "Processing layer " << i << std::endl; std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl; if (i > 0) { if (layer_type[i] == SOFTMAX) continue; else { space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } // std::cerr << "max_consume: " << max_consume << std::endl; } int layer_to_prefetch = findPrefetchLayer(i); // if layer to be prefetched, allocate space for that layer if (layer_to_prefetch != -1) { std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; size_t cur_filter_workspace_size; checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size)); size_t cur_data_workspace_size = 0; if (i > 0) checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size)); size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size; space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } // std::cerr << "max_consume: " << max_consume << std::endl; if (!space_tracker.isAvailable()) return false; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl; if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } if (!space_tracker.isAvailable()) return false; // deallocate layer output and derivative space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl; // if 1st layer, deallocate input layer also if (i == 0) { space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n"; if (space_tracker.getConsumed() != 0) std::cerr << "Panic!! bytes not freed properly\n"; // return true; exp_max_consume = max_consume; // check with cnmem once bool ret_val = simulateCNMEMMemory(max_consume); return ret_val; } bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) { size_t init_max_consume = max_consume; cnmemDevice_t cnmem_device; size_t t; checkCudaErrors(hipMemGetInfo(&free_bytes, &t)); std::cout << "free_bytes: " << free_bytes << std::endl; free_bytes -= 100 * 1024 * 1024; cnmem_device.device = 0; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; std::string cnmem_memory_state_filename; if (vdnn_type == vDNN_ALL) { if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { cnmem_memory_state_filename = "cnmem_all_p.dat"; } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { cnmem_memory_state_filename = "cnmem_all_m.dat"; } } else if (vdnn_type == vDNN_CONV) { if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { cnmem_memory_state_filename = "cnmem_conv_p.dat"; } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { cnmem_memory_state_filename = "cnmem_conv_m.dat"; } } else if (vdnn_type == vDNN_DYN) { cnmem_memory_state_filename = "cnmem_dyn.dat"; } else { cnmem_memory_state_filename = "cnmem_unknown.dat"; } FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w"); size_t run_count = 0; bool out_of_memory = false; while (true) { run_count++; if (max_consume >= free_bytes) break; out_of_memory = false; cnmem_device.size = max_consume; std::cerr << run_count << ' ' << max_consume << std::endl; if (max_consume > free_bytes) std::cerr << "panic!! max_consume > free_bytes\n"; checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); resetPrefetched(); fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n"); fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count); fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume); fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n"); fprintf(cnmem_memory_state_fptr, "initial state\n"); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL), layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (layer_type[i] == CONV) { checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (to_offload[i]) { checkCNMEMSim(cnmemFree(layer_input[i], NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } } if (out_of_memory) { checkCNMEM(cnmemFinalize()); if (max_consume < free_bytes) continue; else break; } checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL), layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL), layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; continue; } if (layer_type[i] == CONV) { checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (i == 0) { checkCNMEMSim(cnmemFree(layer_input[i], NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } checkCNMEM(cnmemFinalize()); if (out_of_memory) { if (max_consume < free_bytes) continue; else break; } break; } free_bytes += 100 * 1024 * 1024; if (max_consume < free_bytes) { double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024); double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024); fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size); fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size); } else { fprintf(cnmem_memory_state_fptr, "out of memory\n"); } fclose(cnmem_memory_state_fptr); if (max_consume < free_bytes) return true; else return false; } void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) { bool hard = true, soft = false; // if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available if (vdnn_type == vDNN_ALL) { setOffload(OFFLOAD_ALL); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_CONV) { setOffload(OFFLOAD_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_NONE) { setOffload(OFFLOAD_NONE); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_ALTERNATE_CONV) { setOffload(OFFLOAD_ALTERNATE_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } if (vdnn_type == vDNN_DYN) { // check for trainability std::cerr << "vDNN_DYN\n"; setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); // check if work with fastest algo and no offload, if so, select it and return setOffload(NeuralNet::OFFLOAD_NONE); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n"; return; } // check if conv offload and fastest algo works, then check if all offload and fastest algo works setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n"; return; } // optimize using greedy algo memory usage while improving performance setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, ALL OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n"; return; } } exit(0); } void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) { if (offload_type == OFFLOAD_NONE) { for (int i = 0; i < num_layers; i++) to_offload[i] = false; } else if (offload_type == OFFLOAD_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALL) { for (int i = 0; i < num_layers; i++) { // Only SOFTMAX, CONV, POOL, FULLY_CONNECTED used so far if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX or layer_type[i] == FULLY_CONNECTED) to_offload[i] = false; else to_offload[i] = true; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALTERNATE_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } bool toggle = true; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) { if (toggle == false) to_offload[i] = false; toggle = !toggle; } } } } void NeuralNet::resetPrefetched() { for (int i = 0; i < num_layers; i++) prefetched[i] = false; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) { std::vector<float> t1, t2; this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss); } void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) { CnmemSpace space_tracker(free_bytes); // std::cout << "here\n"; // std::cout << "Free bytes: " << free_bytes << std::endl; for (int i = 0; i < num_layers; i++) prefetched[i] = false; checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); checkCudaErrors(hipMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, hipMemcpyHostToDevice)); if (train == true) { checkCudaErrors(hipMemcpy(this->y, y, batch_size * data_type_size, hipMemcpyHostToDevice)); } float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { if (train == false && i == num_layers - 1) break; // ---------------------- vDNN start ---------------------- size_t cur_workspace_size; void *cur_workspace; // offload if required if (i > 0 && to_offload[i] && train == true) checkCudaErrors(hipMemcpyAsync(h_layer_input[i], layer_input[i], layer_input_size[i] * data_type_size, hipMemcpyDeviceToHost, stream_memory)); checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ // std::cout << "here" << i << std::endl; if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } // std::cout << "FChere" << i << std::endl; } else if (layer_type[i] == DROPOUT) { // std::cout << "Dropout\n"; DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { // std::cout << "Batchnorm\n"; BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (train == true) { checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else { checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon)); } } else if (layer_type[i] == POOLING) { // std::cout << "Pooling\n"; PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { // std::cout << "Actv\n"; std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); if (train == true) { SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(hipDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; if (train == true) { layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } i--; } struct timespec start_time, end_time; checkCudaErrors(hipStreamSynchronize(stream_compute)); if (train) clock_gettime(CLOCK_MONOTONIC, &start_time); checkCudaErrors(hipStreamSynchronize(stream_memory)); if (train) { clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; fwd_vdnn_lag.push_back(lag); } // std::cout << "EndSynchere" << i << std::endl; if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); } if (to_offload[i] && train == true) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (train == false) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // std::cout << "EndSynchere" << i << std::endl; // ---------------------- vDNN end ------------------------ } // std::cout << "here" << std::endl; if (train == false) { compareOutputCorrect(correct_count, y); checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size); return; } *scalar_loss = computeLoss(); // ---------------------- vDNN start ---------------------- checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ if (layer_type[num_layers - 1] == SOFTMAX) { // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; if (data_type == CUDNN_DATA_FLOAT) { checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); hipLaunchKernelGGL(( softmaxLossBackProp<float>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (float *)layer_input[num_layers], (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCudaErrors(hipMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); hipLaunchKernelGGL(( softmaxLossBackProp<double>), dim3(ceil(1.0 * batch_size / BW)), dim3(BW), 0, 0, this->y, (double *)layer_input[num_layers], (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; if (layer_to_prefetch != 0) { checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory)); } else { // std::cout << "transfer here\n"; checkCudaErrors(hipMemcpyAsync(layer_input[layer_to_prefetch], X, layer_input_size[layer_to_prefetch] * data_type_size, hipMemcpyHostToDevice, stream_memory)); // std::cout << "transfer here\n"; } } checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); } // std::cout << "Free bytes: " << free_bytes << std::endl; } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasSgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(hipblasDgemm(cublas_handle, HIPBLAS_OP_T, HIPBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); } checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(hipDeviceSynchronize()); struct timespec start_time, end_time; checkCudaErrors(hipStreamSynchronize(stream_compute)); if (train) clock_gettime(CLOCK_MONOTONIC, &start_time); checkCudaErrors(hipStreamSynchronize(stream_memory)); if (train) { clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); } if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == BATCHNORM) { if (train == true and !pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); if (i == 0) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } // ---------------------- vDNN end ------------------------ } if (space_tracker.getConsumed() != 0) { std::cout << "Panic!! Space not updated properly\n"; } // exit(0); } int NeuralNet::findPrefetchLayer(int cur_layer) { for (int i = cur_layer - 1; i >= 0; i--) { if (to_offload[i] && !prefetched[i]) { prefetched[i] = true; return i; } else if (layer_type[i] == CONV) { return -1; } } return -1; }
59a226118c40710508193054d25c22560736389f.cu
#include "neural_net.h" #include <time.h> #include <cstdio> #include <string> template <typename T> __global__ void softmaxLossBackProp(int *y, T *SO, T *dSO, int batch_size, int output_size, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; int cur_class = static_cast<int>(y[i]); dSO[i * output_size + cur_class] = -1 / (SO[i * output_size + cur_class] * batch_size + eps); } template <typename T> __global__ void computeSoftmaxLoss(T *O, int *y, float *loss, int batch_size, int num_classes, float eps) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; loss[i] = -logf(O[i * num_classes + y[i]] + eps); } template <typename T> __global__ void inferClass(T *O, int *pred_y, int batch_size, int num_classes) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size) return; T max = O[i * num_classes]; int index = 0; for (int j = 1; j < num_classes; j++) { if (O[i * num_classes + j] > max) { max = O[i * num_classes + j]; index = j; } } pred_y[i] = index; } float NeuralNet::computeLoss() { if (layer_type[num_layers - 1] == SOFTMAX) { if (data_type == CUDNN_DATA_FLOAT) computeSoftmaxLoss<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); else if (data_type == CUDNN_DATA_DOUBLE) computeSoftmaxLoss<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)layer_input[num_layers], this->y, loss, batch_size, num_classes, softmax_eps); } checkCudaErrors(cudaMemcpy(h_loss, loss, batch_size * sizeof(float), cudaMemcpyDeviceToHost)); float total_loss = 0.0; for (int i = 0; i < batch_size; i++) total_loss += h_loss[i]; return total_loss / batch_size; } void NeuralNet::compareOutputCorrect(int *correct_count, int *y) { *correct_count = 0; if (data_type == CUDNN_DATA_FLOAT) { float *typecast_O = (float *)layer_input[num_layers - 1]; inferClass<float><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } else if (data_type == CUDNN_DATA_DOUBLE) { double *typecast_O = (double *)layer_input[num_layers - 1]; inferClass<double><<<ceil(1.0 * batch_size / BW), BW>>>(typecast_O, pred_y, batch_size, num_classes); checkCudaErrors(cudaMemcpy(h_pred_y, pred_y, batch_size * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 0; i < batch_size; i++) { if (h_pred_y[i] == y[i]) *correct_count = *correct_count + 1; } } } NeuralNet::NeuralNet(std::vector<LayerSpecifier> &layers, DataType data_type, int batch_size, TensorFormat tensor_format, long long dropout_seed, float softmax_eps, float init_std_dev, vDNNType vdnn_type, vDNNConvAlgo vdnn_conv_algo, UpdateRule update_rule) { // ---------------------- vDNN start ---------------------- checkCudaErrors(cudaStreamCreate(&stream_compute)); checkCudaErrors(cudaStreamCreate(&stream_memory)); this->vdnn_type = vdnn_type; this->vdnn_conv_algo = vdnn_conv_algo; // ---------------------- vDNN end ------------------------ // create handle checkCUDNN(cudnnCreate(&cudnn_handle)); checkCUDNN(cudnnSetStream(cudnn_handle, stream_compute)); checkCUBLAS(cublasCreate(&cublas_handle)); checkCUBLAS(cublasSetStream(cublas_handle, stream_compute)); checkCURAND(curandCreateGenerator(&curand_gen, CURAND_RNG_PSEUDO_DEFAULT)); checkCURAND(curandSetStream(curand_gen, stream_compute)); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); init_free_bytes = free_bytes; std::cout << "Free bytes at start: " << free_bytes << std::endl; pre_alloc_conv_derivative = false; pre_alloc_fc_derivative = false; pre_alloc_batch_norm_derivative = true; if (vdnn_type == vDNN_NONE) { pre_alloc_conv_derivative = true; pre_alloc_fc_derivative = true; pre_alloc_batch_norm_derivative = true; } if (data_type == DATA_FLOAT) { this->data_type = CUDNN_DATA_FLOAT; data_type_size = sizeof(float); } else if (data_type == DATA_DOUBLE) { this->data_type = CUDNN_DATA_DOUBLE; data_type_size = sizeof(double); } if (tensor_format == TENSOR_NCHW) this->tensor_format = CUDNN_TENSOR_NCHW; else if (tensor_format == TENSOR_NHWC) this->tensor_format = CUDNN_TENSOR_NHWC; this->batch_size = batch_size; this->softmax_eps = softmax_eps; this->init_std_dev = init_std_dev; num_layers = layers.size(); // allocation of space for input to each layer layer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); layer_input_size = (int *)malloc((num_layers + 1) * sizeof(int)); dlayer_input = (void **)malloc((num_layers + 1) * sizeof(void *)); params = (void **)malloc(num_layers * sizeof(void *)); LayerDimension prev_output_size; LayerDimension current_output_size; for (int i = 0; i < num_layers; i++) { layer_type.push_back(layers[i].type); if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; params[i] = malloc(sizeof(ConvLayerParams)); ((ConvLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, data_type_size, current_output_size, update_rule); } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; params[i] = malloc(sizeof(FCLayerParams)); ((FCLayerParams *)params[i])->initializeValues(user_params, batch_size, this->tensor_format, this->data_type, current_output_size, update_rule); } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; params[i] = malloc(sizeof(DropoutLayerParams)); ((DropoutLayerParams *)params[i])->initializeValues(cudnn_handle, user_params, this->data_type, batch_size, this->tensor_format, current_output_size); } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((BatchNormLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size, update_rule); } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; params[i] = malloc(sizeof(BatchNormLayerParams)); ((PoolingLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; params[i] = malloc(sizeof(ActivationLayerParams)); ((ActivationLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; params[i] = malloc(sizeof(SoftmaxLayerParams)); ((SoftmaxLayerParams *)params[i])->initializeValues(user_params, this->data_type, this->tensor_format, batch_size, current_output_size); // std::cout << current_output_size.N << ' ' << current_output_size.C << current_output_size.H << current_output_size.W << std::endl; } if (i == 0) { prev_output_size = current_output_size; } // incomplete - have to check flatten and check exact dimension // else if (current_output_size.getTotalSize() != prev_output_size.getTotalSize()) { // std::cout << "Layer " << i << " output and next layer's input size mismatch\n"; // exit(0); // } } // ---------------------- vDNN start ---------------------- // allocate space in host memory for layers to be transferred h_layer_input = (void **)malloc(num_layers * sizeof(void *)); to_offload = (bool *)malloc(num_layers * sizeof(bool)); prefetched = (bool *)malloc(num_layers * sizeof(bool)); // ---------------------- vDNN end ------------------------ checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just before allocate space: " << free_bytes << std::endl; // allocate space for parameters // Exception BatchNorm - looks like it will take lots of space if only FC layers - space taken = size of one input for (int i = 0; i < num_layers; i++) { size_t input_size; if (layers[i].type == CONV) { ConvDescriptor *user_params = (ConvDescriptor *)layers[i].params; ((ConvLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_conv_derivative); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == FULLY_CONNECTED) { FCDescriptor *user_params = (FCDescriptor *)layers[i].params; ((FCLayerParams *)params[i])->allocateSpace(curand_gen, this->data_type, data_type_size, init_std_dev, free_bytes, pre_alloc_fc_derivative); input_size = batch_size * user_params->input_channels; if (i == 0) { input_channels = user_params->input_channels; input_h = 1; input_w = 1; } } else if (layers[i].type == DROPOUT) { DropoutDescriptor *user_params = (DropoutDescriptor *)layers[i].params; ((DropoutLayerParams *)params[i])->allocateSpace(free_bytes, cudnn_handle, user_params, dropout_seed); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == BATCHNORM) { BatchNormDescriptor *user_params = (BatchNormDescriptor *)layers[i].params; ((BatchNormLayerParams *)params[i])->allocateSpace(this->data_type, data_type_size, free_bytes, pre_alloc_batch_norm_derivative); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == POOLING) { PoolingDescriptor *user_params = (PoolingDescriptor *)layers[i].params; ((PoolingLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->input_channels * user_params->input_h * user_params->input_w; if (i == 0) { input_channels = user_params->input_channels; input_h = user_params->input_h; input_w = user_params->input_w; } } else if (layers[i].type == ACTV) { ActivationDescriptor *user_params = (ActivationDescriptor *)layers[i].params; ((ActivationLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } } else if (layers[i].type == SOFTMAX) { SoftmaxDescriptor *user_params = (SoftmaxDescriptor *)layers[i].params; ((SoftmaxLayerParams *)params[i])->allocateSpace(free_bytes); input_size = batch_size * user_params->channels * user_params->h * user_params->w; // assuming this is last layer, allocate for next layer as well // checkCudaErrors(cudaMalloc(&layer_input[i + 1], input_size * data_type_size)); // checkCudaErrors(cudaMalloc(&dlayer_input[i + 1], input_size * data_type_size)); layer_input_size[i + 1] = input_size; if (i == 0) { input_channels = user_params->channels; input_h = user_params->h; input_w = user_params->w; } if (i == num_layers - 1) { num_classes = user_params->channels; } } // do not allocate memory initially // checkCudaErrors(cudaMalloc(&layer_input[i], input_size * data_type_size)); // checkCudaErrors(cudaMalloc(&dlayer_input[i], input_size * data_type_size)); // ---------------------- vDNN start ---------------------- layer_input_size[i] = input_size; // ---------------------- vDNN end ------------------------ } checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); std::cout << "Free bytes just after allocate space: " << free_bytes << std::endl; // very small - could be allocated initially itself checkCudaErrors(cudaMalloc((void **)&y, batch_size * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&pred_y, batch_size * sizeof(int))); checkCudaErrors(cudaMalloc((void **)&loss, batch_size * sizeof(float))); checkCudaErrors(cudaMalloc(&one_vec, batch_size * data_type_size)); if (this->data_type == CUDNN_DATA_FLOAT) fillValue<float><<<ceil(1.0 * batch_size / BW), BW>>>((float *)one_vec, batch_size, 1); else fillValue<double><<<ceil(1.0 * batch_size / BW), BW>>>((double *)one_vec, batch_size, 1); checkCudaErrors(cudaMallocHost((void **)&h_loss, batch_size * sizeof(float))); checkCudaErrors(cudaMallocHost((void **)&h_pred_y, batch_size * sizeof(int))); // do not allocate workspace initially // allocate space for workspace and also keep track of algo // size_t cur_workspace_size; // workspace_size = 0; // for (int i = 0; i < num_layers; i++) { // if (layers[i].type == CONV) { // ((ConvLayerParams *)params[i])->getWorkspaceSize(cur_workspace_size, free_bytes); // if (cur_workspace_size > workspace_size) // workspace_size = cur_workspace_size; // } // } // checkCudaErrors(cudaMalloc(&workspace, workspace_size)); // free_bytes = free_bytes - workspace_size; checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemGetInfo(&free_bytes, &total_bytes)); // leave 600 MB and use the rest std::cout << "Free bytes: " << free_bytes << std::endl; free_bytes -= 1024 * 1024 * 600; // ---------------------- vDNN start ---------------------- size_t exp_max_consume, max_consume; vDNNOptimize(exp_max_consume, max_consume); std::cout << "actual_max_consume: " << max_consume << std::endl; std::cout << "exp_max_consume: " << exp_max_consume << std::endl; std::cout << "diff_max_consume(MB): " << (max_consume - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_free_bytes(MB): " << (free_bytes + 1024 * 1024 * 600 - exp_max_consume) / (1.0 * 1024 * 1024) << std::endl; std::cout << "exp_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - exp_max_consume)) / (1.0 * 1024 * 1024) << std::endl; std::cout << "actual_total_consume(MB): " << (init_free_bytes - (free_bytes + 600 * 1024 * 1024 - max_consume)) / (1.0 * 1024 * 1024) << std::endl; // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- free_bytes = max_consume; cnmemDevice_t cnmem_device; size_t cnmem_stream_memory_size = free_bytes; cnmem_device.device = 0; cnmem_device.size = cnmem_stream_memory_size; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; // do not allow call to cudaMalloc checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); // ---------------------- vDNN end ------------------------ // ---------------------- vDNN start ---------------------- for (int i = 0; i < num_layers; i++) { std::cerr << "to_offload[i] " << to_offload[i] << std::endl; } for (int i = 0; i < num_layers; i++) { // allocate pinned memory in host if (to_offload[i]) checkCudaErrors(cudaMallocHost(&h_layer_input[i], layer_input_size[i] * data_type_size)); } // ---------------------- vDNN end ------------------------ checkCudaErrors(cudaDeviceSynchronize()); size_t temp_free_bytes; checkCudaErrors(cudaMemGetInfo(&temp_free_bytes, &total_bytes)); std::cout << "Free bytes just before end of NeuralNet: " << temp_free_bytes << std::endl; // { // int n; // std::cout << "waiting..\n"; // std::cin >> n; // } // data of time checkCudaErrors(cudaEventCreate(&start_compute)); checkCudaErrors(cudaEventCreate(&stop_compute)); checkCudaErrors(cudaEventCreate(&start_transfer)); checkCudaErrors(cudaEventCreate(&stop_transfer)); } bool NeuralNet::simulateNeuralNetworkMemory(vDNNConvAlgoPref algo_pref, bool hard, size_t &exp_max_consume, size_t &max_consume) { CnmemSpace space_tracker(free_bytes); max_consume = 0; // forward pass // allocate space for 1st input std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating input(MB): " << space_tracker.getConsumed() << std::endl; std::cerr << "Forward pass" << std::endl; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == SOFTMAX) break; std::cerr << "Processing layer " << i << std::endl; std::cerr << "Initial Used space(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after output allocation(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; size_t cur_workspace_size; checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::FWD, algo_pref, hard, cur_workspace_size)); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); space_tracker.updateMaxConsume(max_consume); if (!space_tracker.isAvailable()) return false; std::cerr << "Used space after workspace allocation(MB): " << space_tracker.getConsumed() << std::endl; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after workspace deallocation(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; // deallocate layer input if (to_offload[i]) { std::cerr << "deallocating input to " << i << std::endl; space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } std::cerr << "Backward pass" << std::endl; if (batch_size * num_classes * data_type_size != layer_input_size[num_layers] * data_type_size) { std::cout << "Panic!! Using wrong size\n"; exit(0); } // backward pass space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); std::cerr << "Used space after allocating final derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); // std::cerr << "max_consume: " << max_consume << std::endl; for (int i = num_layers - 1; i >= 0; i--) { // allocate space for previous layer derivative std::cerr << "Processing layer " << i << std::endl; std::cerr << "Used space initial(MB): " << space_tracker.getConsumed() << std::endl; if (i > 0) { if (layer_type[i] == SOFTMAX) continue; else { space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); std::cerr << "Used space after allocating prev. derivative(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } // std::cerr << "max_consume: " << max_consume << std::endl; } int layer_to_prefetch = findPrefetchLayer(i); // if layer to be prefetched, allocate space for that layer if (layer_to_prefetch != -1) { std::cerr << "Prefetch layer " << layer_to_prefetch << std::endl; space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); std::cerr << "Used space after allocating prefetch(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); } if (layer_type[i] == CONV) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; size_t cur_filter_workspace_size; checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_FILTER, algo_pref, hard, cur_filter_workspace_size)); size_t cur_data_workspace_size = 0; if (i > 0) checkWORKSPACE(cur_params->getWorkspaceSize(space_tracker.free_bytes, ConvLayerParams::BWD_DATA, algo_pref, hard, cur_data_workspace_size)); size_t cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size :cur_data_workspace_size; space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); std::cerr << "Used space after allocating workspace(MB): " << space_tracker.getConsumed() << std::endl; space_tracker.updateMaxConsume(max_consume); if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } // std::cerr << "max_consume: " << max_consume << std::endl; if (!space_tracker.isAvailable()) return false; // current layer computation over, deallocate workspace space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); std::cerr << "Used space after deallocating workspace(MB): " << space_tracker.getConsumed() << std::endl; if (!pre_alloc_conv_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_fc_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateMaxConsume(max_consume); std::cerr << "Used space after allocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } if (!space_tracker.isAvailable()) return false; if (!pre_alloc_batch_norm_derivative) { space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); std::cerr << "Used space after deallocating weight derv.(MB): " << space_tracker.getConsumed() << std::endl; } } if (!space_tracker.isAvailable()) return false; // deallocate layer output and derivative space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); std::cerr << "Used space after deallocating output, derivative(MB): " << space_tracker.getConsumed() << std::endl; // if 1st layer, deallocate input layer also if (i == 0) { space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); std::cerr << "Used space after deallocating input(MB): " << space_tracker.getConsumed() << std::endl; } } if (space_tracker.getConsumed() > 0) std::cerr << "Panic!! more free bytes\n"; if (space_tracker.getConsumed() != 0) std::cerr << "Panic!! bytes not freed properly\n"; // return true; exp_max_consume = max_consume; // check with cnmem once bool ret_val = simulateCNMEMMemory(max_consume); return ret_val; } bool NeuralNet::simulateCNMEMMemory(size_t &max_consume) { size_t init_max_consume = max_consume; cnmemDevice_t cnmem_device; size_t t; checkCudaErrors(cudaMemGetInfo(&free_bytes, &t)); std::cout << "free_bytes: " << free_bytes << std::endl; free_bytes -= 100 * 1024 * 1024; cnmem_device.device = 0; cnmem_device.numStreams = 0; cnmem_device.streams = NULL; cnmem_device.streamSizes = NULL; std::string cnmem_memory_state_filename; if (vdnn_type == vDNN_ALL) { if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { cnmem_memory_state_filename = "cnmem_all_p.dat"; } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { cnmem_memory_state_filename = "cnmem_all_m.dat"; } } else if (vdnn_type == vDNN_CONV) { if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { cnmem_memory_state_filename = "cnmem_conv_p.dat"; } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { cnmem_memory_state_filename = "cnmem_conv_m.dat"; } } else if (vdnn_type == vDNN_DYN) { cnmem_memory_state_filename = "cnmem_dyn.dat"; } else { cnmem_memory_state_filename = "cnmem_unknown.dat"; } FILE *cnmem_memory_state_fptr = fopen(cnmem_memory_state_filename.c_str(), "w"); size_t run_count = 0; bool out_of_memory = false; while (true) { run_count++; if (max_consume >= free_bytes) break; out_of_memory = false; cnmem_device.size = max_consume; std::cerr << run_count << ' ' << max_consume << std::endl; if (max_consume > free_bytes) std::cerr << "panic!! max_consume > free_bytes\n"; checkCNMEM(cnmemInit(1, &cnmem_device, CNMEM_FLAGS_CANNOT_GROW)); resetPrefetched(); fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n"); fprintf(cnmem_memory_state_fptr, "run_count: %lu\n", run_count); fprintf(cnmem_memory_state_fptr, "max_consume: %lu\n", max_consume); fprintf(cnmem_memory_state_fptr, "//////////////////////////////////////////////////////////////////\n"); fprintf(cnmem_memory_state_fptr, "initial state\n"); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); checkCNMEMSim(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL), layer_input_size[0] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", 0, layer_input_size[0] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); // forward propagate for (int i = 0; i < num_layers; i++) { size_t cur_workspace_size; void *cur_workspace; checkCNMEMSim(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (layer_type[i] == CONV) { checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (to_offload[i]) { checkCNMEMSim(cnmemFree(layer_input[i], NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } } if (out_of_memory) { checkCNMEM(cnmemFinalize()); if (max_consume < free_bytes) continue; else break; } checkCNMEMSim(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL), layer_input_size[num_layers] * data_type_size, max_consume, free_bytes, checkCNMEM(cnmemFinalize()); continue, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", num_layers, layer_input_size[num_layers] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEMSim(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL), layer_input_size[layer_to_prefetch] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. prefetch layer_input[%d] - size: %lu\n", layer_to_prefetch, layer_input_size[layer_to_prefetch] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } checkCNMEMSim(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. dlayer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; // allocate space for derivative if (!pre_alloc_conv_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->kernel_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEMSim(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after alloc. conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (!pre_alloc_fc_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dW - size: %lu\n", cur_params->weight_matrix_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. db - size: %lu\n", cur_params->C_out * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { if (!cur_params->cnmemAllocDerivativesCheck(data_type_size, NULL, max_consume, free_bytes, out_of_memory)) break; fprintf(cnmem_memory_state_fptr, "after alloc. dscale - size: %lu\n", cur_params->allocation_size * data_type_size); fprintf(cnmem_memory_state_fptr, "after alloc. dbias - size: %lu\n", cur_params->allocation_size * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; continue; } if (layer_type[i] == CONV) { checkCNMEMSim(cnmemFree(cur_workspace, NULL), cur_workspace_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free conv. workspace - size: %lu\n", cur_workspace_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } else if (layer_type[i] == BATCHNORM) { if (!pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); fprintf(cnmem_memory_state_fptr, "after free dP - size: %lu\n", (long unsigned)0); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } checkCNMEMSim(cnmemFree(layer_input[i + 1], NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); checkCNMEMSim(cnmemFree(dlayer_input[i + 1], NULL), layer_input_size[i + 1] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free dlayer_input[%d] - size: %lu\n", i + 1, layer_input_size[i + 1] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); if (i == 0) { checkCNMEMSim(cnmemFree(layer_input[i], NULL), layer_input_size[i] * data_type_size, max_consume, free_bytes, break, out_of_memory); fprintf(cnmem_memory_state_fptr, "after free layer_input[%d] - size: %lu\n", i, layer_input_size[i] * data_type_size); cnmemPrintMemoryStateTogether(cnmem_memory_state_fptr, NULL); } } checkCNMEM(cnmemFinalize()); if (out_of_memory) { if (max_consume < free_bytes) continue; else break; } break; } free_bytes += 100 * 1024 * 1024; if (max_consume < free_bytes) { double exp_size = (init_max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024); double act_size = (max_consume + init_free_bytes - free_bytes) / (1.0 * 1024 * 1024); fprintf(cnmem_memory_state_fptr, "expected_memory_consume: %f MB\n", exp_size); fprintf(cnmem_memory_state_fptr, "actual_memory_consume: %f MB\n", act_size); } else { fprintf(cnmem_memory_state_fptr, "out of memory\n"); } fclose(cnmem_memory_state_fptr); if (max_consume < free_bytes) return true; else return false; } void NeuralNet::vDNNOptimize(size_t &exp_max_consume, size_t &max_consume) { bool hard = true, soft = false; // if type is vDNN_ALL or vDNN_CONV, check if sufficient space is available if (vdnn_type == vDNN_ALL) { setOffload(OFFLOAD_ALL); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_CONV) { setOffload(OFFLOAD_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_NONE) { setOffload(OFFLOAD_NONE); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } else if (vdnn_type == vDNN_ALTERNATE_CONV) { setOffload(OFFLOAD_ALTERNATE_CONV); resetPrefetched(); if (vdnn_conv_algo == vDNN_PERFORMANCE_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } else if (vdnn_conv_algo == vDNN_MEMORY_OPTIMAL) { if (!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); } return; } if (vdnn_type == vDNN_DYN) { // check for trainability std::cerr << "vDNN_DYN\n"; setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(!simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) outOfMemory(); // check if work with fastest algo and no offload, if so, select it and return setOffload(NeuralNet::OFFLOAD_NONE); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, NO OFFLOAD\n"; return; } // check if conv offload and fastest algo works, then check if all offload and fastest algo works setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing PERF_OPT, ALL OFFLOAD\n"; return; } // optimize using greedy algo memory usage while improving performance setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_PERFORMANCE_OPTIMAL, soft, exp_max_consume, max_consume)) { std::cerr << "Choosing GREEDY, ALL OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_CONV); resetPrefetched(); if (simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, CONV OFFLOAD\n"; return; } setOffload(NeuralNet::OFFLOAD_ALL); resetPrefetched(); if(simulateNeuralNetworkMemory(PREFER_MEMORY_OPTIMAL, hard, exp_max_consume, max_consume)) { std::cerr << "Choosing MEM_OPT, ALL OFFLOAD\n"; return; } } exit(0); } void NeuralNet::setOffload(NeuralNet::OffloadType offload_type) { if (offload_type == OFFLOAD_NONE) { for (int i = 0; i < num_layers; i++) to_offload[i] = false; } else if (offload_type == OFFLOAD_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALL) { for (int i = 0; i < num_layers; i++) { // Only SOFTMAX, CONV, POOL, FULLY_CONNECTED used so far if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX or layer_type[i] == FULLY_CONNECTED) to_offload[i] = false; else to_offload[i] = true; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } } else if (offload_type == OFFLOAD_ALTERNATE_CONV) { for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) to_offload[i] = true; else to_offload[i] = false; } // set last non SOFTMAX/ACTV layer to no_offload for (int i = num_layers - 1; i >= 0; i--) { if (layer_type[i] == SOFTMAX or layer_type[i] == ACTV) ; else { to_offload[i] = false; break; } } bool toggle = true; for (int i = 0; i < num_layers; i++) { if (layer_type[i] == CONV) { if (toggle == false) to_offload[i] = false; toggle = !toggle; } } } } void NeuralNet::resetPrefetched() { for (int i = 0; i < num_layers; i++) prefetched[i] = false; } void NeuralNet::getLoss(void *X, int *y, double learning_rate, bool train, int *correct_count, float *loss) { std::vector<float> t1, t2; this->getLoss(X, y, learning_rate, t1, t2, train, correct_count, loss); } void NeuralNet::getLoss(void *X, int *y, double learning_rate, std::vector<float> &fwd_vdnn_lag, std::vector<float> &bwd_vdnn_lag, bool train, int *correct_count, float *scalar_loss) { CnmemSpace space_tracker(free_bytes); // std::cout << "here\n"; // std::cout << "Free bytes: " << free_bytes << std::endl; for (int i = 0; i < num_layers; i++) prefetched[i] = false; checkCNMEM(cnmemMalloc(&layer_input[0], layer_input_size[0] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[0] * data_type_size); checkCudaErrors(cudaMemcpy(layer_input[0], X, batch_size * input_channels * input_h * input_w * data_type_size, cudaMemcpyHostToDevice)); if (train == true) { checkCudaErrors(cudaMemcpy(this->y, y, batch_size * data_type_size, cudaMemcpyHostToDevice)); } float alpha = 1.0, beta = 0.0; float Salpha = 1.0, Sbeta = 0.0; double Dalpha = 1.0, Dbeta = 0.0; // forward propagate for (int i = 0; i < num_layers; i++) { if (train == false && i == num_layers - 1) break; // ---------------------- vDNN start ---------------------- size_t cur_workspace_size; void *cur_workspace; // offload if required if (i > 0 && to_offload[i] && train == true) checkCudaErrors(cudaMemcpyAsync(h_layer_input[i], layer_input[i], layer_input_size[i] * data_type_size, cudaMemcpyDeviceToHost, stream_memory)); checkCNMEM(cnmemMalloc(&layer_input[i + 1], layer_input_size[i + 1] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i + 1] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ // std::cout << "here" << i << std::endl; if (layer_type[i] == CONV) { // std::cout << "conv\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_workspace_size = cur_params->fwd_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); // computation checkCUDNN(cudnnConvolutionForward(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->filter_desc, cur_params->W, cur_params->conv_desc, cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta, cur_params->output_tensor, layer_input[i + 1])); checkCUDNN(cudnnAddTensor(cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha, cur_params->output_tensor, layer_input[i + 1])); // if activation required if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; } else if (layer_type[i] == FULLY_CONNECTED) { // std::cout << "FC\n"; FCLayerParams *cur_params = (FCLayerParams *)params[i]; // std::cout << "FChere" << i << std::endl; if (data_type == CUDNN_DATA_FLOAT) { checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Salpha, (float *)cur_params->b, cur_params->C_out, (float *)one_vec, 1, &Salpha, (float *)layer_input[i + 1], cur_params->C_out)); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, cur_params->C_in, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)layer_input[i + 1], cur_params->C_out)); checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, batch_size, 1, &Dalpha, (double *)cur_params->b, cur_params->C_out, (double *)one_vec, 1, &Dalpha, (double *)layer_input[i + 1], cur_params->C_out)); } if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, layer_input[i + 1])); } // std::cout << "FChere" << i << std::endl; } else if (layer_type[i] == DROPOUT) { // std::cout << "Dropout\n"; DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutForward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { // std::cout << "Batchnorm\n"; BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (train == true) { checkCUDNN(cudnnBatchNormalizationForwardTraining(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->factor, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); } else { checkCUDNN(cudnnBatchNormalizationForwardInference(cudnn_handle, cur_params->mode, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, layer_input[i + 1], cur_params->sbmv_desc, cur_params->scale, cur_params->bias, cur_params->running_mean, cur_params->running_variance, cur_params->epsilon)); } } else if (layer_type[i] == POOLING) { // std::cout << "Pooling\n"; PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingForward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->output_tensor, layer_input[i + 1])); } else if (layer_type[i] == ACTV) { // std::cout << "Actv\n"; std::cout << "Panic!! ACTV wrong place\n"; exit(0); ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationForward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } else if (layer_type[i] == SOFTMAX) { // std::cout << "Softmax\n"; std::cout << "Panic!! SOFTMAX wrong place\n"; exit(0); if (train == true) { SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } } // ---------------------- vDNN start ---------------------- // synchronization // checkCudaErrors(cudaDeviceSynchronize()); // if next layer is ACTV or SOFTMAX, complete that and come to synchronization // the case in above if for ACTV and SOFTMAX never occurs if (layer_type[i + 1] == SOFTMAX) { i++; if (train == true) { layer_input[i + 1] = layer_input[i]; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxForward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, layer_input[i + 1])); } i--; } struct timespec start_time, end_time; checkCudaErrors(cudaStreamSynchronize(stream_compute)); if (train) clock_gettime(CLOCK_MONOTONIC, &start_time); checkCudaErrors(cudaStreamSynchronize(stream_memory)); if (train) { clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; fwd_vdnn_lag.push_back(lag); } // std::cout << "EndSynchere" << i << std::endl; if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); } if (to_offload[i] && train == true) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (train == false) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } if (layer_type[i + 1] == ACTV or layer_type[i + 1] == SOFTMAX) { i = i + 1; } // std::cout << "EndSynchere" << i << std::endl; // ---------------------- vDNN end ------------------------ } // std::cout << "here" << std::endl; if (train == false) { compareOutputCorrect(correct_count, y); checkCNMEM(cnmemFree(layer_input[num_layers - 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[num_layers - 1] * data_type_size); return; } *scalar_loss = computeLoss(); // ---------------------- vDNN start ---------------------- checkCNMEM(cnmemMalloc(&dlayer_input[num_layers], batch_size * num_classes * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[num_layers] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // ---------------------- vDNN end ------------------------ if (layer_type[num_layers - 1] == SOFTMAX) { // SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[num_layers - 1]; if (data_type == CUDNN_DATA_FLOAT) { checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(float))); softmaxLossBackProp<float><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (float *)layer_input[num_layers], (float *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } else if (data_type == CUDNN_DATA_DOUBLE) { checkCudaErrors(cudaMemset(dlayer_input[num_layers], 0, batch_size * num_classes * sizeof(double))); softmaxLossBackProp<double><<<ceil(1.0 * batch_size / BW), BW>>>(this->y, (double *)layer_input[num_layers], (double *)dlayer_input[num_layers], batch_size, num_classes, softmax_eps); } } for (int i = num_layers - 1; i >= 0; i--) { // ---------------------- vDNN start ---------------------- size_t cur_filter_workspace_size, cur_data_workspace_size, cur_workspace_size; void *cur_workspace; if (i > 0) { if (layer_type[i] == ACTV or layer_type[i] == SOFTMAX) { dlayer_input[i] = dlayer_input[i + 1]; } else { int layer_to_prefetch = findPrefetchLayer(i); if (layer_to_prefetch != -1) { checkCNMEM(cnmemMalloc(&layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[layer_to_prefetch] * data_type_size); // std::cout << "Free bytes: " << free_bytes << std::endl; if (layer_to_prefetch != 0) { checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], h_layer_input[layer_to_prefetch], layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory)); } else { // std::cout << "transfer here\n"; checkCudaErrors(cudaMemcpyAsync(layer_input[layer_to_prefetch], X, layer_input_size[layer_to_prefetch] * data_type_size, cudaMemcpyHostToDevice, stream_memory)); // std::cout << "transfer here\n"; } } checkCNMEM(cnmemMalloc(&dlayer_input[i], layer_input_size[i] * data_type_size, NULL)); space_tracker.updateSpace(CnmemSpace::SUB, layer_input_size[i] * data_type_size); } // std::cout << "Free bytes: " << free_bytes << std::endl; } // ---------------------- vDNN end ------------------------ if (layer_type[i] == CONV) { // std::cout << "here\n"; ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } // allocate space for derivative if (!pre_alloc_conv_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } cur_filter_workspace_size = cur_params->bwd_filter_workspace_size; if (i > 0) cur_data_workspace_size = cur_params->bwd_data_workspace_size; else cur_data_workspace_size = 0; // std::cout << "bwd cur_workspace_size: " << cur_workspace_size << std::endl; cur_workspace_size = (cur_filter_workspace_size > cur_data_workspace_size) ? cur_filter_workspace_size : cur_data_workspace_size; checkCNMEM(cnmemMalloc(&cur_workspace, cur_workspace_size, NULL)); checkCUDNN(cudnnConvolutionBackwardBias(cudnn_handle, &alpha, cur_params->output_tensor, dlayer_input[i + 1], &beta, cur_params->bias_desc, cur_params->db)); // std::cout << "neural_net: backward conv i:" << i << std::endl; checkCUDNN(cudnnConvolutionBackwardFilter(cudnn_handle, &alpha, cur_params->input_tensor, layer_input[i], cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_filter_algo, cur_workspace, cur_workspace_size, &beta, cur_params->filter_desc, cur_params->dW)); if (i > 0) checkCUDNN(cudnnConvolutionBackwardData(cudnn_handle, &alpha, cur_params->filter_desc, cur_params->W, cur_params->output_tensor, dlayer_input[i + 1], cur_params->conv_desc, cur_params->bwd_data_algo, cur_workspace, cur_workspace_size, &beta, cur_params->input_tensor, dlayer_input[i])); space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size); // std::cout << "Free bytes: " << free_bytes << std::endl; // std::cout << "here\n"; cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == FULLY_CONNECTED) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; if (cur_params->activation_mode != ACTIVATION_NONE) { checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->output_tensor, layer_input[i + 1], &beta, cur_params->output_tensor, dlayer_input[i + 1])); } if (!pre_alloc_fc_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->C_out * data_type_size); } if (data_type == CUDNN_DATA_FLOAT) { // bias backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)one_vec, batch_size, &Sbeta, (float *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Salpha, (float *)dlayer_input[i + 1], cur_params->C_out, (float *)layer_input[i], cur_params->C_in, &Sbeta, (float *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasSgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Salpha, (float *)cur_params->W, cur_params->C_out, (float *)dlayer_input[i + 1], cur_params->C_out, &Sbeta, (float *)dlayer_input[i], cur_params->C_in)); } else if (data_type == CUDNN_DATA_DOUBLE) { // bias backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out, 1, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)one_vec, batch_size, &Dbeta, (double *)cur_params->db, cur_params->C_out)); // weight backward checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, cur_params->C_out, cur_params->C_in, batch_size, &Dalpha, (double *)dlayer_input[i + 1], cur_params->C_out, (double *)layer_input[i], cur_params->C_in, &Dbeta, (double *)cur_params->dW, cur_params->C_out)); // data backward if (i > 0) checkCUBLAS(cublasDgemm(cublas_handle, CUBLAS_OP_T, CUBLAS_OP_N, cur_params->C_in, batch_size, cur_params->C_out, &Dalpha, (double *)cur_params->W, cur_params->C_out, (double *)dlayer_input[i + 1], cur_params->C_out, &Dbeta, (double *)dlayer_input[i], cur_params->C_in)); } cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == DROPOUT) { DropoutLayerParams *cur_params = (DropoutLayerParams *)params[i]; checkCUDNN(cudnnDropoutBackward(cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->reserved_space, cur_params->reserved_space_size)); } else if (layer_type[i] == BATCHNORM) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; if (!pre_alloc_batch_norm_derivative) { cur_params->cnmemAllocDerivatives(data_type_size, NULL); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::SUB, cur_params->allocation_size * data_type_size); } checkCUDNN(cudnnBatchNormalizationBackward(cudnn_handle, cur_params->mode, &alpha, &beta, &alpha, &beta, cur_params->input_tensor, layer_input[i], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, dlayer_input[i], cur_params->sbmv_desc, cur_params->scale, cur_params->dscale, cur_params->dbias, cur_params->epsilon, cur_params->result_save_mean, cur_params->result_save_inv_var)); cur_params->stepParams(cublas_handle, learning_rate); } else if (layer_type[i] == POOLING) { PoolingLayerParams *cur_params = (PoolingLayerParams *)params[i]; checkCUDNN(cudnnPoolingBackward(cudnn_handle, cur_params->pool_desc, &alpha, cur_params->output_tensor, layer_input[i + 1], cur_params->output_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); } else if (layer_type[i] == ACTV) { ActivationLayerParams *cur_params = (ActivationLayerParams *)params[i]; checkCUDNN(cudnnActivationBackward(cudnn_handle, cur_params->actv_desc, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], cur_params->input_tensor, layer_input[i], &beta, cur_params->input_tensor, dlayer_input[i])); continue; } else if (layer_type[i] == SOFTMAX) { // std::cout << "compute here\n"; SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)params[i]; checkCUDNN(cudnnSoftmaxBackward(cudnn_handle, cur_params->algo, cur_params->mode, &alpha, cur_params->input_tensor, layer_input[i + 1], cur_params->input_tensor, dlayer_input[i + 1], &beta, cur_params->input_tensor, dlayer_input[i])); // std::cout << "compute here\n"; continue; } // ---------------------- vDNN start ---------------------- // checkCudaErrors(cudaDeviceSynchronize()); struct timespec start_time, end_time; checkCudaErrors(cudaStreamSynchronize(stream_compute)); if (train) clock_gettime(CLOCK_MONOTONIC, &start_time); checkCudaErrors(cudaStreamSynchronize(stream_memory)); if (train) { clock_gettime(CLOCK_MONOTONIC, &end_time); float lag = (end_time.tv_sec - start_time.tv_sec) * 1e3 + (end_time.tv_nsec - start_time.tv_nsec) * 1e-6; bwd_vdnn_lag.insert(bwd_vdnn_lag.begin(), lag); } if (layer_type[i] == CONV) { checkCNMEM(cnmemFree(cur_workspace, NULL)); space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size); if (!pre_alloc_conv_derivative) { ConvLayerParams *cur_params = (ConvLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->kernel_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == FULLY_CONNECTED) { if (!pre_alloc_fc_derivative) { FCLayerParams *cur_params = (FCLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->weight_matrix_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->C_out * data_type_size); } } else if (layer_type[i] == BATCHNORM) { if (train == true and !pre_alloc_batch_norm_derivative) { BatchNormLayerParams *cur_params = (BatchNormLayerParams *)params[i]; cur_params->cnmemFreeDerivatives(NULL); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); space_tracker.updateSpace(CnmemSpace::ADD, cur_params->allocation_size * data_type_size); } } checkCNMEM(cnmemFree(layer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); checkCNMEM(cnmemFree(dlayer_input[i + 1], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i + 1] * data_type_size); if (i == 0) { checkCNMEM(cnmemFree(layer_input[i], NULL)); space_tracker.updateSpace(CnmemSpace::ADD, layer_input_size[i] * data_type_size); } // ---------------------- vDNN end ------------------------ } if (space_tracker.getConsumed() != 0) { std::cout << "Panic!! Space not updated properly\n"; } // exit(0); } int NeuralNet::findPrefetchLayer(int cur_layer) { for (int i = cur_layer - 1; i >= 0; i--) { if (to_offload[i] && !prefetched[i]) { prefetched[i] = true; return i; } else if (layer_type[i] == CONV) { return -1; } } return -1; }
2df631290245a86ee91cd8eac037334daa17f7a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include "Point.h" #include <stdio.h> const int MAX_THREADS_PER_BLOCK = 1024; int numOfBlocks; int dev_numOfPoints; int chunkSize; Point* dev_allPoints; float* dev_distancesArr; __global__ void setPointsForTimeIncrement(Point* dev_allPoints, float cosCalc, float sinCalc, int numOfPoints) { //int currentThread = threadIdx.x; //int currentBlock = blockIdx.x; ////setting the position of the thread. //int pos = currentBlock*MAX_THREADS_PER_BLOCK + currentThread; //if(pos < numOfPoints) //{ // dev_allPoints[pos].x = (float)dev_allPoints[pos].a + (dev_allPoints[pos].radius*cosCalc); // dev_allPoints[pos].y = (float)dev_allPoints[pos].b + (dev_allPoints[pos].radius*sinCalc); //} } void setPointsDevice(Point* allPoints, float timeIncrement, float timeInterval) { hipError_t cudaStatus; //set the cos/sin calc for the threads ONE TIME ONLY. float cosCalc = (float)cos((2*acos(-1.0) *timeIncrement)/timeInterval); float sinCalc = (float)sin((2*acos(-1.0) *timeIncrement)/timeInterval); hipLaunchKernelGGL(( setPointsForTimeIncrement), dim3(numOfBlocks), dim3(MAX_THREADS_PER_BLOCK), 0, 0, dev_allPoints, cosCalc, sinCalc, dev_numOfPoints); cudaStatus = hipGetLastError(); if(cudaStatus != hipSuccess) { fprintf(stderr, "setPointsForTimeIncrement launch failed: %s\n", hipGetErrorString(cudaStatus)); releaseDeviceMemory(); return; } cudaStatus = hipDeviceSynchronize(); if(cudaStatus != hipSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } cudaStatus = hipMemcpy(allPoints, dev_allPoints, dev_numOfPoints*sizeof(Point), hipMemcpyDeviceToHost); if(cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy from device to host failed!"); releaseDeviceMemory(); return; } cudaStatus = hipDeviceSynchronize(); if(cudaStatus != hipSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } } void copyPointsToDevice(Point* allPoints, int numOfPoints, int chunk) { hipError_t cudaStatus; dev_numOfPoints = numOfPoints; chunkSize = chunk; cudaStatus = hipMalloc((void**)&dev_allPoints, dev_numOfPoints*sizeof(Point)); if(cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc for dev_allPoints failed"); releaseDeviceMemory(); return; } cudaStatus = hipDeviceSynchronize(); if(cudaStatus != hipSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } cudaStatus = hipMemcpy(dev_allPoints, allPoints, dev_numOfPoints*sizeof(Point), hipMemcpyHostToDevice); if(cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy from host to device failed!"); releaseDeviceMemory(); return; } numOfBlocks = dev_numOfPoints/MAX_THREADS_PER_BLOCK; if(dev_numOfPoints%MAX_THREADS_PER_BLOCK != 0) numOfBlocks += 1; cudaStatus = hipDeviceSynchronize(); if(cudaStatus != hipSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } } void initDevice() { hipError_t cudaStatus; // hipDeviceProp_t deviceProp; cudaStatus = hipSetDevice(0); if(cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed!"); releaseDeviceMemory(); return; } cudaStatus = hipDeviceSynchronize(); if(cudaStatus != hipSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } // hipGetDeviceProperties(&deviceProp, 0); // maxNumberOfThreadPerBlock = deviceProp.maxThreadsPerBlock; } void releaseDeviceMemory() { hipFree(dev_allPoints); }
2df631290245a86ee91cd8eac037334daa17f7a1.cu
#include "kernel.h" #include "Point.h" #include <stdio.h> const int MAX_THREADS_PER_BLOCK = 1024; int numOfBlocks; int dev_numOfPoints; int chunkSize; Point* dev_allPoints; float* dev_distancesArr; __global__ void setPointsForTimeIncrement(Point* dev_allPoints, float cosCalc, float sinCalc, int numOfPoints) { //int currentThread = threadIdx.x; //int currentBlock = blockIdx.x; ////setting the position of the thread. //int pos = currentBlock*MAX_THREADS_PER_BLOCK + currentThread; //if(pos < numOfPoints) //{ // dev_allPoints[pos].x = (float)dev_allPoints[pos].a + (dev_allPoints[pos].radius*cosCalc); // dev_allPoints[pos].y = (float)dev_allPoints[pos].b + (dev_allPoints[pos].radius*sinCalc); //} } void setPointsDevice(Point* allPoints, float timeIncrement, float timeInterval) { cudaError_t cudaStatus; //set the cos/sin calc for the threads ONE TIME ONLY. float cosCalc = (float)cos((2*acos(-1.0) *timeIncrement)/timeInterval); float sinCalc = (float)sin((2*acos(-1.0) *timeIncrement)/timeInterval); setPointsForTimeIncrement<<<numOfBlocks, MAX_THREADS_PER_BLOCK>>>(dev_allPoints, cosCalc, sinCalc, dev_numOfPoints); cudaStatus = cudaGetLastError(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "setPointsForTimeIncrement launch failed: %s\n", cudaGetErrorString(cudaStatus)); releaseDeviceMemory(); return; } cudaStatus = cudaDeviceSynchronize(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } cudaStatus = cudaMemcpy(allPoints, dev_allPoints, dev_numOfPoints*sizeof(Point), cudaMemcpyDeviceToHost); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy from device to host failed!"); releaseDeviceMemory(); return; } cudaStatus = cudaDeviceSynchronize(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } } void copyPointsToDevice(Point* allPoints, int numOfPoints, int chunk) { cudaError_t cudaStatus; dev_numOfPoints = numOfPoints; chunkSize = chunk; cudaStatus = cudaMalloc((void**)&dev_allPoints, dev_numOfPoints*sizeof(Point)); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc for dev_allPoints failed"); releaseDeviceMemory(); return; } cudaStatus = cudaDeviceSynchronize(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } cudaStatus = cudaMemcpy(dev_allPoints, allPoints, dev_numOfPoints*sizeof(Point), cudaMemcpyHostToDevice); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy from host to device failed!"); releaseDeviceMemory(); return; } numOfBlocks = dev_numOfPoints/MAX_THREADS_PER_BLOCK; if(dev_numOfPoints%MAX_THREADS_PER_BLOCK != 0) numOfBlocks += 1; cudaStatus = cudaDeviceSynchronize(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } } void initDevice() { cudaError_t cudaStatus; // cudaDeviceProp deviceProp; cudaStatus = cudaSetDevice(0); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed!"); releaseDeviceMemory(); return; } cudaStatus = cudaDeviceSynchronize(); if(cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDevice Synchronize returned error code %d\n", cudaStatus); releaseDeviceMemory(); return; } // cudaGetDeviceProperties(&deviceProp, 0); // maxNumberOfThreadPerBlock = deviceProp.maxThreadsPerBlock; } void releaseDeviceMemory() { cudaFree(dev_allPoints); }
3dca8a4348b2b5e51a770bce699353f31f62de8f.hip
// !!! This is a file automatically generated by hipify!!! /** * * Date 11 june 2009 * ==== * * Authors Vincent Garcia * ======= Eric Debreuve * Michel Barlaud * * Description Given a reference point set and a query point set, the program returns * =========== the distance between each query point and its k-th nearest neighbor in * the reference point set. Only the distance is provided. The computation * is performed using the API NVIDIA CUDA. * * Paper Fast k nearest neighbor search using GPU * ===== * * BibTeX @INPROCEEDINGS{2008_garcia_cvgpu, * ====== author = {V. Garcia and E. Debreuve and M. Barlaud}, * title = {Fast k nearest neighbor search using GPU}, * booktitle = {CVPR Workshop on Computer Vision on GPU}, * year = {2008}, * address = {Anchorage, Alaska, USA}, * month = {June} * } * */ // If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0. #define MATLAB_CODE 0 // Includes #include <stdio.h> #include <cmath> #include <algorithm> #include <hip/hip_runtime.h> #include <rocblas.h> #if MATLAB_CODE == 1 #include "mex.h" #else #include <time.h> #endif // Constants used by the program #define MAX_PITCH_VALUE_IN_BYTES 262144 #define MAX_TEXTURE_WIDTH_IN_BYTES 65536 #define MAX_TEXTURE_HEIGHT_IN_BYTES 32768 #define MAX_PART_OF_FREE_MEMORY_USED 0.9 #define BLOCK_DIM 16 //-----------------------------------------------------------------------------------------------// // KERNELS // //-----------------------------------------------------------------------------------------------// /** * Given a matrix of size width*height, compute the square norm of each column. * * @param mat : the matrix * @param width : the number of columns for a colum major storage matrix * @param height : the number of rowm for a colum major storage matrix * @param norm : the vector containing the norm of the matrix */ __global__ void cuComputeNorm(float *mat, int width, int pitch, int height, float *norm){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ float val, sum = 0; int i; for (i = 0; i<height; i++){ val = mat[i*pitch + xIndex]; sum += val*val; } norm[xIndex] = sum; } } /** * Given the distance matrix of size width*height, adds the column vector * of size 1*height to each column of the matrix. * * @param dist : the matrix * @param width : the number of columns for a colum major storage matrix * @param pitch : the pitch in number of column * @param height : the number of rowm for a colum major storage matrix * @param vec : the vector to be added */ __global__ void cuAddRNorm(float *dist, int width, int pitch, int height, float *vec){ unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int xIndex = blockIdx.x * blockDim.x + tx; unsigned int yIndex = blockIdx.y * blockDim.y + ty; __shared__ float shared_vec[16]; if (tx == 0 && yIndex<height) shared_vec[ty] = vec[yIndex]; __syncthreads(); if (xIndex<width && yIndex<height) dist[yIndex*pitch + xIndex] += shared_vec[ty]; } /** * Given two row vectors with width column, adds the two vectors and compute * the square root of the sum. The result is stored in the first vector. * * @param vec1 : the first vector * @param vec2 : the second vector * @param width : the number of columns for a colum major storage matrix */ __global__ void cuAddQNormAndSqrt(float *vec1, float *vec2, int width){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ vec1[xIndex] = sqrt(vec1[xIndex] + vec2[xIndex]); } } /** * Gathers k-th smallest distances for each column of the distance matrix in the top. * * @param dist distance matrix * @param width width of the distance matrix * @param pitch pitch of the distance matrix given in number of columns * @param height height of the distance matrix * @param k number of smallest distance to consider */ __global__ void cuInsertionSort(float *dist, int width, int pitch, int height, int k){ // Variables int l, i, j; float *p; float v, max_value; unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ // Pointer shift and max value p = dist + xIndex; max_value = *p; // Part 1 : sort kth firt element for (l = pitch; l<k*pitch; l += pitch){ v = *(p + l); if (v<max_value){ i = 0; while (i<l && *(p + i) <= v) i += pitch; for (j = l; j>i; j -= pitch) *(p + j) = *(p + j - pitch); *(p + i) = v; } max_value = *(p + l); } // Part 2 : insert element in the k-th first lines for (l = k*pitch; l<height*pitch; l += pitch){ v = *(p + l); if (v<max_value){ i = 0; while (i<k*pitch && *(p + i) <= v) i += pitch; for (j = (k - 1)*pitch; j>i; j -= pitch) *(p + j) = *(p + j - pitch); *(p + i) = v; max_value = *(p + (k - 1)*pitch); } } } } //-----------------------------------------------------------------------------------------------// // K-th NEAREST NEIGHBORS // //-----------------------------------------------------------------------------------------------// /** * Prints the error message return during the memory allocation. * * @param error error value return by the memory allocation function * @param memorySize size of memory tried to be allocated */ void printErrorMessage(hipError_t error, int memorySize){ printf("==================================================\n"); printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error)); printf("Whished allocated memory : %d\n", memorySize); printf("==================================================\n"); #if MATLAB_CODE == 1 mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION"); #endif } /** * K nearest neighbor algorithm * - Initialize CUDA * - Allocate device memory * - Copy point sets (reference and query points) from host to device memory * - Compute the distance to the k-th nearest neighbor for each query point * - Copy distances from device to host memory * * @param ref_host reference points ; pointer to linear matrix * @param ref_width number of reference points ; width of the matrix * @param query_host query points ; pointer to linear matrix * @param query_width number of query points ; width of the matrix * @param height dimension of points ; height of the matrices * @param k number of neighbor to consider * @param dist_host distances to k-th nearest neighbor ; pointer to linear matrix * */ void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host){ unsigned int size_of_float = sizeof(float); // Variables float *dist_dev; float *query_dev; float *ref_dev; float *query_norm; float *ref_norm; size_t query_pitch; size_t query_pitch_in_bytes; size_t ref_pitch; size_t ref_pitch_in_bytes; size_t max_nb_query_traited; size_t actual_nb_query_width; unsigned int memory_total; unsigned int memory_free; hipError_t result; // CUDA Initialisation hipInit(0); hipblasInit(); // Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used hipCtx_t cuContext; hipDevice_t cuDevice = 0; hipCtxCreate(&cuContext, 0, cuDevice); cuMemGetInfo(&memory_free, &memory_total); hipCtxDetach(cuContext); // Determine maximum number of query that can be treated max_nb_query_traited = (memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width * (height + 1)) / (size_of_float * (height + ref_width + 1)); max_nb_query_traited = min(query_width, (max_nb_query_traited / 16) * 16); // Allocation of global memory for query points, ||query||, and for 2.R^T.Q result = hipMallocPitch((void **)&query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, (height + ref_width + 1)); if (result){ printErrorMessage(result, max_nb_query_traited * size_of_float * (height + ref_width + 1)); return; } query_pitch = query_pitch_in_bytes / size_of_float; query_norm = query_dev + height * query_pitch; dist_dev = query_norm + query_pitch; // Allocation of global memory for reference points and ||query|| result = hipMallocPitch((void **)&ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height + 1); if (result){ printErrorMessage(result, ref_width * size_of_float * (height + 1)); hipFree(query_dev); return; } ref_pitch = ref_pitch_in_bytes / size_of_float; ref_norm = ref_dev + height * ref_pitch; // Memory copy of ref_host in ref_dev result = hipMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, hipMemcpyHostToDevice); // Computation of reference square norm dim3 G_ref_norm(ref_width / 256, 1, 1); dim3 T_ref_norm(256, 1, 1); if (ref_width % 256 != 0) G_ref_norm.x += 1; cuComputeNorm << <G_ref_norm, T_ref_norm >> >(ref_dev, ref_width, ref_pitch, height, ref_norm); // Main loop: split queries to fit in GPU memory for (int i = 0; i<query_width; i += max_nb_query_traited){ // Nomber of query points actually used actual_nb_query_width = min(max_nb_query_traited, query_width - i); // Memory copy of ref_host in ref_dev hipMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, hipMemcpyHostToDevice); // Computation of Q square norm dim3 G_query_norm(actual_nb_query_width / 256, 1, 1); dim3 T_query_norm(256, 1, 1); if (actual_nb_query_width % 256 != 0) G_query_norm.x += 1; cuComputeNorm << <G_query_norm, T_query_norm >> >(query_dev, actual_nb_query_width, query_pitch, height, query_norm); // Computation of Q*transpose(R) hipblasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, height, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch); // Add R norm to distances dim3 grid(actual_nb_query_width / 16, ref_width / 16, 1); dim3 thread(16, 16, 1); if (actual_nb_query_width % 16 != 0) grid.x += 1; if (ref_width % 16 != 0) grid.y += 1; cuAddRNorm << <grid, thread >> >(dist_dev, actual_nb_query_width, query_pitch, ref_width, ref_norm); // Sort each column cuInsertionSort << <G_query_norm, T_query_norm >> >(dist_dev, actual_nb_query_width, query_pitch, ref_width, k); // Add Q norm and compute Sqrt ONLY ON ROW K-1 cuAddQNormAndSqrt << <G_query_norm, T_query_norm >> >(dist_dev + (k - 1)*query_pitch, query_norm, actual_nb_query_width); // Memory copy hipMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev + (k - 1)*query_pitch, query_pitch_in_bytes, actual_nb_query_width*size_of_float, 1, hipMemcpyDeviceToHost); } // Free memory hipFree(ref_dev); hipFree(query_dev); // CUBLAS shutdown hipblasShutdown(); } //-----------------------------------------------------------------------------------------------// // MATLAB INTERFACES & C EXAMPLE // //-----------------------------------------------------------------------------------------------// #if MATLAB_CODE == 1 /** * Interface to use CUDA code in Matlab (gateway routine). * * @param nlhs Number of expected mxArrays (Left Hand Side) * @param plhs Array of pointers to expected outputs * @param nrhs Number of inputs (Right Hand Side) * @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction . */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ // Variables float* ref; int ref_width; int ref_height; float* query; int query_width; int query_height; float* dist; int k; // Reference points ref = (float *)mxGetData(prhs[0]); ref_width = mxGetM(prhs[0]); ref_height = mxGetN(prhs[0]); // Query points query = (float *)mxGetData(prhs[1]); query_width = mxGetM(prhs[1]); query_height = mxGetN(prhs[1]); // Number of neighbors to consider k = (int)mxGetScalar(prhs[2]); // Verification of the reference point and query point sizes if (ref_height != query_height) mexErrMsgTxt("Data must have the same dimension"); if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES) mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)"); if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES) mexErrMsgTxt("Query number is too large for CUDA (Max=65536)"); // Allocation of dist array dist = (float *)mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, 1, mxSINGLE_CLASS, mxREAL)); // Call KNN CUDA knn(ref, ref_width, query, query_width, ref_height, k, dist); } #else // C code /** * Example of use of kNN search CUDA. */ int main(void){ // Variables and parameters float* ref; // Pointer to reference point array float* query; // Pointer to query point array float* dist; // Pointer to distance array int ref_nb = 4096; // Reference point number, max=65535 int query_nb = 4096; // Query point number, max=65535 int dim = 32; // Dimension of points, max=8192 int k = 20; // Nearest neighbors to consider int iterations = 100; int i; // Memory allocation ref = (float *)malloc(ref_nb * dim * sizeof(float)); query = (float *)malloc(query_nb * dim * sizeof(float)); dist = (float *)malloc(query_nb * sizeof(float)); // Init srand(time(NULL)); for (i = 0; i<ref_nb * dim; i++) ref[i] = (float)rand() / (float)RAND_MAX; for (i = 0; i<query_nb * dim; i++) query[i] = (float)rand() / (float)RAND_MAX; // Variables for duration evaluation hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float elapsed_time; // Display informations printf("Number of reference points : %6d\n", ref_nb); printf("Number of query points : %6d\n", query_nb); printf("Dimension of points : %4d\n", dim); printf("Number of neighbors to consider : %4d\n", k); printf("Processing kNN search :"); // Call kNN search CUDA hipEventRecord(start, 0); for (i = 0; i<iterations; i++) knn(ref, ref_nb, query, query_nb, dim, k, dist); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000)); // Destroy cuda event object and free memory hipEventDestroy(start); hipEventDestroy(stop); free(dist); free(query); free(ref); } #endif
3dca8a4348b2b5e51a770bce699353f31f62de8f.cu
/** * * Date 11 june 2009 * ==== * * Authors Vincent Garcia * ======= Eric Debreuve * Michel Barlaud * * Description Given a reference point set and a query point set, the program returns * =========== the distance between each query point and its k-th nearest neighbor in * the reference point set. Only the distance is provided. The computation * is performed using the API NVIDIA CUDA. * * Paper Fast k nearest neighbor search using GPU * ===== * * BibTeX @INPROCEEDINGS{2008_garcia_cvgpu, * ====== author = {V. Garcia and E. Debreuve and M. Barlaud}, * title = {Fast k nearest neighbor search using GPU}, * booktitle = {CVPR Workshop on Computer Vision on GPU}, * year = {2008}, * address = {Anchorage, Alaska, USA}, * month = {June} * } * */ // If the code is used in Matlab, set MATLAB_CODE to 1. Otherwise, set MATLAB_CODE to 0. #define MATLAB_CODE 0 // Includes #include <stdio.h> #include <cmath> #include <algorithm> #include <cuda.h> #include <cublas.h> #if MATLAB_CODE == 1 #include "mex.h" #else #include <time.h> #endif // Constants used by the program #define MAX_PITCH_VALUE_IN_BYTES 262144 #define MAX_TEXTURE_WIDTH_IN_BYTES 65536 #define MAX_TEXTURE_HEIGHT_IN_BYTES 32768 #define MAX_PART_OF_FREE_MEMORY_USED 0.9 #define BLOCK_DIM 16 //-----------------------------------------------------------------------------------------------// // KERNELS // //-----------------------------------------------------------------------------------------------// /** * Given a matrix of size width*height, compute the square norm of each column. * * @param mat : the matrix * @param width : the number of columns for a colum major storage matrix * @param height : the number of rowm for a colum major storage matrix * @param norm : the vector containing the norm of the matrix */ __global__ void cuComputeNorm(float *mat, int width, int pitch, int height, float *norm){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ float val, sum = 0; int i; for (i = 0; i<height; i++){ val = mat[i*pitch + xIndex]; sum += val*val; } norm[xIndex] = sum; } } /** * Given the distance matrix of size width*height, adds the column vector * of size 1*height to each column of the matrix. * * @param dist : the matrix * @param width : the number of columns for a colum major storage matrix * @param pitch : the pitch in number of column * @param height : the number of rowm for a colum major storage matrix * @param vec : the vector to be added */ __global__ void cuAddRNorm(float *dist, int width, int pitch, int height, float *vec){ unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int xIndex = blockIdx.x * blockDim.x + tx; unsigned int yIndex = blockIdx.y * blockDim.y + ty; __shared__ float shared_vec[16]; if (tx == 0 && yIndex<height) shared_vec[ty] = vec[yIndex]; __syncthreads(); if (xIndex<width && yIndex<height) dist[yIndex*pitch + xIndex] += shared_vec[ty]; } /** * Given two row vectors with width column, adds the two vectors and compute * the square root of the sum. The result is stored in the first vector. * * @param vec1 : the first vector * @param vec2 : the second vector * @param width : the number of columns for a colum major storage matrix */ __global__ void cuAddQNormAndSqrt(float *vec1, float *vec2, int width){ unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ vec1[xIndex] = sqrt(vec1[xIndex] + vec2[xIndex]); } } /** * Gathers k-th smallest distances for each column of the distance matrix in the top. * * @param dist distance matrix * @param width width of the distance matrix * @param pitch pitch of the distance matrix given in number of columns * @param height height of the distance matrix * @param k number of smallest distance to consider */ __global__ void cuInsertionSort(float *dist, int width, int pitch, int height, int k){ // Variables int l, i, j; float *p; float v, max_value; unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ // Pointer shift and max value p = dist + xIndex; max_value = *p; // Part 1 : sort kth firt element for (l = pitch; l<k*pitch; l += pitch){ v = *(p + l); if (v<max_value){ i = 0; while (i<l && *(p + i) <= v) i += pitch; for (j = l; j>i; j -= pitch) *(p + j) = *(p + j - pitch); *(p + i) = v; } max_value = *(p + l); } // Part 2 : insert element in the k-th first lines for (l = k*pitch; l<height*pitch; l += pitch){ v = *(p + l); if (v<max_value){ i = 0; while (i<k*pitch && *(p + i) <= v) i += pitch; for (j = (k - 1)*pitch; j>i; j -= pitch) *(p + j) = *(p + j - pitch); *(p + i) = v; max_value = *(p + (k - 1)*pitch); } } } } //-----------------------------------------------------------------------------------------------// // K-th NEAREST NEIGHBORS // //-----------------------------------------------------------------------------------------------// /** * Prints the error message return during the memory allocation. * * @param error error value return by the memory allocation function * @param memorySize size of memory tried to be allocated */ void printErrorMessage(cudaError_t error, int memorySize){ printf("==================================================\n"); printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error)); printf("Whished allocated memory : %d\n", memorySize); printf("==================================================\n"); #if MATLAB_CODE == 1 mexErrMsgTxt("CUDA ERROR DURING MEMORY ALLOCATION"); #endif } /** * K nearest neighbor algorithm * - Initialize CUDA * - Allocate device memory * - Copy point sets (reference and query points) from host to device memory * - Compute the distance to the k-th nearest neighbor for each query point * - Copy distances from device to host memory * * @param ref_host reference points ; pointer to linear matrix * @param ref_width number of reference points ; width of the matrix * @param query_host query points ; pointer to linear matrix * @param query_width number of query points ; width of the matrix * @param height dimension of points ; height of the matrices * @param k number of neighbor to consider * @param dist_host distances to k-th nearest neighbor ; pointer to linear matrix * */ void knn(float* ref_host, int ref_width, float* query_host, int query_width, int height, int k, float* dist_host){ unsigned int size_of_float = sizeof(float); // Variables float *dist_dev; float *query_dev; float *ref_dev; float *query_norm; float *ref_norm; size_t query_pitch; size_t query_pitch_in_bytes; size_t ref_pitch; size_t ref_pitch_in_bytes; size_t max_nb_query_traited; size_t actual_nb_query_width; unsigned int memory_total; unsigned int memory_free; cudaError_t result; // CUDA Initialisation cuInit(0); cublasInit(); // Check free memory using driver API ; only (MAX_PART_OF_FREE_MEMORY_USED*100)% of memory will be used CUcontext cuContext; CUdevice cuDevice = 0; cuCtxCreate(&cuContext, 0, cuDevice); cuMemGetInfo(&memory_free, &memory_total); cuCtxDetach(cuContext); // Determine maximum number of query that can be treated max_nb_query_traited = (memory_free * MAX_PART_OF_FREE_MEMORY_USED - size_of_float * ref_width * (height + 1)) / (size_of_float * (height + ref_width + 1)); max_nb_query_traited = min(query_width, (max_nb_query_traited / 16) * 16); // Allocation of global memory for query points, ||query||, and for 2.R^T.Q result = cudaMallocPitch((void **)&query_dev, &query_pitch_in_bytes, max_nb_query_traited * size_of_float, (height + ref_width + 1)); if (result){ printErrorMessage(result, max_nb_query_traited * size_of_float * (height + ref_width + 1)); return; } query_pitch = query_pitch_in_bytes / size_of_float; query_norm = query_dev + height * query_pitch; dist_dev = query_norm + query_pitch; // Allocation of global memory for reference points and ||query|| result = cudaMallocPitch((void **)&ref_dev, &ref_pitch_in_bytes, ref_width * size_of_float, height + 1); if (result){ printErrorMessage(result, ref_width * size_of_float * (height + 1)); cudaFree(query_dev); return; } ref_pitch = ref_pitch_in_bytes / size_of_float; ref_norm = ref_dev + height * ref_pitch; // Memory copy of ref_host in ref_dev result = cudaMemcpy2D(ref_dev, ref_pitch_in_bytes, ref_host, ref_width*size_of_float, ref_width*size_of_float, height, cudaMemcpyHostToDevice); // Computation of reference square norm dim3 G_ref_norm(ref_width / 256, 1, 1); dim3 T_ref_norm(256, 1, 1); if (ref_width % 256 != 0) G_ref_norm.x += 1; cuComputeNorm << <G_ref_norm, T_ref_norm >> >(ref_dev, ref_width, ref_pitch, height, ref_norm); // Main loop: split queries to fit in GPU memory for (int i = 0; i<query_width; i += max_nb_query_traited){ // Nomber of query points actually used actual_nb_query_width = min(max_nb_query_traited, query_width - i); // Memory copy of ref_host in ref_dev cudaMemcpy2D(query_dev, query_pitch_in_bytes, &query_host[i], query_width*size_of_float, actual_nb_query_width*size_of_float, height, cudaMemcpyHostToDevice); // Computation of Q square norm dim3 G_query_norm(actual_nb_query_width / 256, 1, 1); dim3 T_query_norm(256, 1, 1); if (actual_nb_query_width % 256 != 0) G_query_norm.x += 1; cuComputeNorm << <G_query_norm, T_query_norm >> >(query_dev, actual_nb_query_width, query_pitch, height, query_norm); // Computation of Q*transpose(R) cublasSgemm('n', 't', (int)query_pitch, (int)ref_pitch, height, (float)-2.0, query_dev, query_pitch, ref_dev, ref_pitch, (float)0.0, dist_dev, query_pitch); // Add R norm to distances dim3 grid(actual_nb_query_width / 16, ref_width / 16, 1); dim3 thread(16, 16, 1); if (actual_nb_query_width % 16 != 0) grid.x += 1; if (ref_width % 16 != 0) grid.y += 1; cuAddRNorm << <grid, thread >> >(dist_dev, actual_nb_query_width, query_pitch, ref_width, ref_norm); // Sort each column cuInsertionSort << <G_query_norm, T_query_norm >> >(dist_dev, actual_nb_query_width, query_pitch, ref_width, k); // Add Q norm and compute Sqrt ONLY ON ROW K-1 cuAddQNormAndSqrt << <G_query_norm, T_query_norm >> >(dist_dev + (k - 1)*query_pitch, query_norm, actual_nb_query_width); // Memory copy cudaMemcpy2D(&dist_host[i], query_width*size_of_float, dist_dev + (k - 1)*query_pitch, query_pitch_in_bytes, actual_nb_query_width*size_of_float, 1, cudaMemcpyDeviceToHost); } // Free memory cudaFree(ref_dev); cudaFree(query_dev); // CUBLAS shutdown cublasShutdown(); } //-----------------------------------------------------------------------------------------------// // MATLAB INTERFACES & C EXAMPLE // //-----------------------------------------------------------------------------------------------// #if MATLAB_CODE == 1 /** * Interface to use CUDA code in Matlab (gateway routine). * * @param nlhs Number of expected mxArrays (Left Hand Side) * @param plhs Array of pointers to expected outputs * @param nrhs Number of inputs (Right Hand Side) * @param prhs Array of pointers to input data. The input data is read-only and should not be altered by your mexFunction . */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){ // Variables float* ref; int ref_width; int ref_height; float* query; int query_width; int query_height; float* dist; int k; // Reference points ref = (float *)mxGetData(prhs[0]); ref_width = mxGetM(prhs[0]); ref_height = mxGetN(prhs[0]); // Query points query = (float *)mxGetData(prhs[1]); query_width = mxGetM(prhs[1]); query_height = mxGetN(prhs[1]); // Number of neighbors to consider k = (int)mxGetScalar(prhs[2]); // Verification of the reference point and query point sizes if (ref_height != query_height) mexErrMsgTxt("Data must have the same dimension"); if (ref_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES) mexErrMsgTxt("Reference number is too large for CUDA (Max=65536)"); if (query_width*sizeof(float)>MAX_PITCH_VALUE_IN_BYTES) mexErrMsgTxt("Query number is too large for CUDA (Max=65536)"); // Allocation of dist array dist = (float *)mxGetPr(plhs[0] = mxCreateNumericMatrix(query_width, 1, mxSINGLE_CLASS, mxREAL)); // Call KNN CUDA knn(ref, ref_width, query, query_width, ref_height, k, dist); } #else // C code /** * Example of use of kNN search CUDA. */ int main(void){ // Variables and parameters float* ref; // Pointer to reference point array float* query; // Pointer to query point array float* dist; // Pointer to distance array int ref_nb = 4096; // Reference point number, max=65535 int query_nb = 4096; // Query point number, max=65535 int dim = 32; // Dimension of points, max=8192 int k = 20; // Nearest neighbors to consider int iterations = 100; int i; // Memory allocation ref = (float *)malloc(ref_nb * dim * sizeof(float)); query = (float *)malloc(query_nb * dim * sizeof(float)); dist = (float *)malloc(query_nb * sizeof(float)); // Init srand(time(NULL)); for (i = 0; i<ref_nb * dim; i++) ref[i] = (float)rand() / (float)RAND_MAX; for (i = 0; i<query_nb * dim; i++) query[i] = (float)rand() / (float)RAND_MAX; // Variables for duration evaluation cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsed_time; // Display informations printf("Number of reference points : %6d\n", ref_nb); printf("Number of query points : %6d\n", query_nb); printf("Dimension of points : %4d\n", dim); printf("Number of neighbors to consider : %4d\n", k); printf("Processing kNN search :"); // Call kNN search CUDA cudaEventRecord(start, 0); for (i = 0; i<iterations; i++) knn(ref, ref_nb, query, query_nb, dim, k, dist); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000)); // Destroy cuda event object and free memory cudaEventDestroy(start); cudaEventDestroy(stop); free(dist); free(query); free(ref); } #endif
6d2fe3224fd49f424a1eb39d22fd31902855bbcc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <util.h> int main (int argc, char * argv[]) { int n_dev; hipGetDeviceCount(& n_dev); printf("%d devices are available\n", n_dev); if (n_dev <= 0) return 1; printf("Checking state of primary context -- before first CUDA runtime API call\n"); check_primary_ctx(n_dev); hipError_t ierr; int i_dev, is_primary, is_clean; ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); printf("Checking state of primary context -- after saxpy on each device sequentially\n"); int M = 1000; int * h_x = (int *) malloc(M*sizeof(int)); int * h_y = (int *) calloc(M, sizeof(int)); for (int i=0; i<M; i++) h_x[i] = 10; int cu_dev = 1; hipCtx_t context; hipDevice_t device; hipDeviceGet(& device, cu_dev); hipCtxCreate(& context, 0, device); cuCtxPushCurrent(context); ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); for (int i=0; i<n_dev; i++) { if (i != cu_dev) hipSetDevice(i); int * d_x, * d_y; api_malloc(& d_x, M); api_malloc(& d_y, M); api_HtoD(h_x, d_x, M); api_HtoD(h_y, d_y, M); hipLaunchKernelGGL(( saxpy_int), dim3((M+255)/256), dim3(256), 0, 0, M, 1, d_x, d_y); api_DtoH(h_x, d_x, M); api_DtoH(h_y, d_y, M); hipFree(d_x); hipFree(d_y); printf("Device %d work result: %d\n", i, h_y[0]); } // ierr = hipCtxGetDevice(& device); // printf("%d, %d\n", device, ierr); ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); hipSetDevice(0); check_primary_ctx(n_dev); free(h_x); free(h_y); return 0; }
6d2fe3224fd49f424a1eb39d22fd31902855bbcc.cu
#include <cuda.h> #include <stdio.h> #include <util.h> int main (int argc, char * argv[]) { int n_dev; cudaGetDeviceCount(& n_dev); printf("%d devices are available\n", n_dev); if (n_dev <= 0) return 1; printf("Checking state of primary context -- before first CUDA runtime API call\n"); check_primary_ctx(n_dev); CUresult ierr; int i_dev, is_primary, is_clean; ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); printf("Checking state of primary context -- after saxpy on each device sequentially\n"); int M = 1000; int * h_x = (int *) malloc(M*sizeof(int)); int * h_y = (int *) calloc(M, sizeof(int)); for (int i=0; i<M; i++) h_x[i] = 10; int cu_dev = 1; CUcontext context; CUdevice device; cuDeviceGet(& device, cu_dev); cuCtxCreate(& context, 0, device); cuCtxPushCurrent(context); ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); for (int i=0; i<n_dev; i++) { if (i != cu_dev) cudaSetDevice(i); int * d_x, * d_y; api_malloc(& d_x, M); api_malloc(& d_y, M); api_HtoD(h_x, d_x, M); api_HtoD(h_y, d_y, M); saxpy_int<<<(M+255)/256, 256>>>(M, 1, d_x, d_y); api_DtoH(h_x, d_x, M); api_DtoH(h_y, d_y, M); cudaFree(d_x); cudaFree(d_y); printf("Device %d work result: %d\n", i, h_y[0]); } // ierr = cuCtxGetDevice(& device); // printf("%d, %d\n", device, ierr); ierr = get_current_device(& i_dev, & is_primary, & is_clean); printf("%d %d %d\n", i_dev, is_primary, is_clean); cudaSetDevice(0); check_primary_ctx(n_dev); free(h_x); free(h_y); return 0; }
e5970fe490a1959d35b511b2a307efc1212df6b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include "finite_difference.h" void get_stencils_values(fd_container *fd_container, float *ddx, float *ddy, float dx, float dy, int NY, int NX, int NY_STAG, int NX_STAG, int z) { for(int j=0; j<NY; j++) { int jp1 = minf(j+1, NY-1); int jm1 = maxf(j-1, 0); for(int i=0; i<NX; i++) { int ip1 = minf(i+1, NX-1); int im1 = maxf(i-1, 0); float dsx = (ip1-im1)*dx; float dsy = (jp1-jm1)*dy; // Store the stencils for dv/dx fd_container->stencils_val[((j*NX)+i)*10] = dsx; fd_container->stencils_val[(((j*NX)+i)*10)+1] = ddx[(z*(NY_STAG*NX))+((j*NX)+ip1)]; fd_container->stencils_val[(((j*NX)+i)*10)+2] = ddx[(z*(NY_STAG*NX))+(((j+1)*NX)+ip1)]; fd_container->stencils_val[(((j*NX)+i)*10)+3] = ddx[(z*(NY_STAG*NX))+((j*NX)+im1)]; fd_container->stencils_val[(((j*NX)+i)*10)+4] = ddx[(z*(NY_STAG*NX))+(((j+1)*NX)+im1)]; // Store the stencils for du/dy fd_container->stencils_val[(((j*NX)+i)*10)+5] = dsy; fd_container->stencils_val[(((j*NX)+i)*10)+6] = ddy[(z*(NY*NX_STAG))+((jp1*NX_STAG)+i)]; fd_container->stencils_val[(((j*NX)+i)*10)+7] = ddy[(z*(NY*NX_STAG))+((jp1*NX_STAG)+i+1)]; fd_container->stencils_val[(((j*NX)+i)*10)+8] = ddy[(z*(NY*NX_STAG))+((jm1*NX_STAG)+i)]; fd_container->stencils_val[(((j*NX)+i)*10)+9] = ddy[(z*(NY*NX_STAG))+((jm1*NX_STAG)+i+1)]; } } } #ifdef __NVCC__ __device__ void get_rel_vert_vort(fd_container *fd_container, int idx, float *dv_dx, float *du_dy) { float dx = fd_container->stencils_val[(idx*10)]; float v1_1 = fd_container->stencils_val[(idx*10)+1]; float v1_2 = fd_container->stencils_val[(idx*10)+2]; float v2_1 = fd_container->stencils_val[(idx*10)+3]; float v2_2 = fd_container->stencils_val[(idx*10)+4]; float dy = fd_container->stencils_val[(idx*10)+5]; float u1_1 = fd_container->stencils_val[(idx*10)+6]; float u1_2 = fd_container->stencils_val[(idx*10)+7]; float u2_1 = fd_container->stencils_val[(idx*10)+8]; float u2_2 = fd_container->stencils_val[(idx*10)+9]; *dv_dx = 0.5f * (v1_1 + v1_2 - v2_1 - v2_2) / dx; *du_dy = 0.5f * (u1_1 + u1_2 - u2_1 - u2_2) / dy; } __global__ void gpu_compute_rel_vert_vort(fd_container *fd_container, const int NY, const int NX, float scaling_factor) { int IDX = blockIdx.x * blockDim.x + threadIdx.x; if (IDX >= (NY*NX)) return; float dv_dx = 0.0f; float du_dy = 0.0f; get_rel_vert_vort(fd_container, IDX, &dv_dx, &du_dy); fd_container->val[IDX] = (dv_dx - du_dy)*scaling_factor; } __global__ void gpu_compute_abs_vert_vort(fd_container *fd_container, const int NY, const int NX, float scaling_factor) { int IDX = blockIdx.x * blockDim.x + threadIdx.x; if (IDX >= (NY*NX)) return; float dv_dx = 0.0f; float du_dy = 0.0f; get_rel_vert_vort(fd_container, IDX, &dv_dx, &du_dy); float earth_angular_velocity = 7.2921e-5; // rad/s float rad_lat = fd_container->buffer[IDX] * M_PI/180.0f; float f = 2.0f*earth_angular_velocity*sinf(rad_lat); fd_container->val[IDX] = (f + (dv_dx - du_dy))*scaling_factor; } #endif
e5970fe490a1959d35b511b2a307efc1212df6b0.cu
#include <math.h> #include "finite_difference.h" void get_stencils_values(fd_container *fd_container, float *ddx, float *ddy, float dx, float dy, int NY, int NX, int NY_STAG, int NX_STAG, int z) { for(int j=0; j<NY; j++) { int jp1 = minf(j+1, NY-1); int jm1 = maxf(j-1, 0); for(int i=0; i<NX; i++) { int ip1 = minf(i+1, NX-1); int im1 = maxf(i-1, 0); float dsx = (ip1-im1)*dx; float dsy = (jp1-jm1)*dy; // Store the stencils for dv/dx fd_container->stencils_val[((j*NX)+i)*10] = dsx; fd_container->stencils_val[(((j*NX)+i)*10)+1] = ddx[(z*(NY_STAG*NX))+((j*NX)+ip1)]; fd_container->stencils_val[(((j*NX)+i)*10)+2] = ddx[(z*(NY_STAG*NX))+(((j+1)*NX)+ip1)]; fd_container->stencils_val[(((j*NX)+i)*10)+3] = ddx[(z*(NY_STAG*NX))+((j*NX)+im1)]; fd_container->stencils_val[(((j*NX)+i)*10)+4] = ddx[(z*(NY_STAG*NX))+(((j+1)*NX)+im1)]; // Store the stencils for du/dy fd_container->stencils_val[(((j*NX)+i)*10)+5] = dsy; fd_container->stencils_val[(((j*NX)+i)*10)+6] = ddy[(z*(NY*NX_STAG))+((jp1*NX_STAG)+i)]; fd_container->stencils_val[(((j*NX)+i)*10)+7] = ddy[(z*(NY*NX_STAG))+((jp1*NX_STAG)+i+1)]; fd_container->stencils_val[(((j*NX)+i)*10)+8] = ddy[(z*(NY*NX_STAG))+((jm1*NX_STAG)+i)]; fd_container->stencils_val[(((j*NX)+i)*10)+9] = ddy[(z*(NY*NX_STAG))+((jm1*NX_STAG)+i+1)]; } } } #ifdef __NVCC__ __device__ void get_rel_vert_vort(fd_container *fd_container, int idx, float *dv_dx, float *du_dy) { float dx = fd_container->stencils_val[(idx*10)]; float v1_1 = fd_container->stencils_val[(idx*10)+1]; float v1_2 = fd_container->stencils_val[(idx*10)+2]; float v2_1 = fd_container->stencils_val[(idx*10)+3]; float v2_2 = fd_container->stencils_val[(idx*10)+4]; float dy = fd_container->stencils_val[(idx*10)+5]; float u1_1 = fd_container->stencils_val[(idx*10)+6]; float u1_2 = fd_container->stencils_val[(idx*10)+7]; float u2_1 = fd_container->stencils_val[(idx*10)+8]; float u2_2 = fd_container->stencils_val[(idx*10)+9]; *dv_dx = 0.5f * (v1_1 + v1_2 - v2_1 - v2_2) / dx; *du_dy = 0.5f * (u1_1 + u1_2 - u2_1 - u2_2) / dy; } __global__ void gpu_compute_rel_vert_vort(fd_container *fd_container, const int NY, const int NX, float scaling_factor) { int IDX = blockIdx.x * blockDim.x + threadIdx.x; if (IDX >= (NY*NX)) return; float dv_dx = 0.0f; float du_dy = 0.0f; get_rel_vert_vort(fd_container, IDX, &dv_dx, &du_dy); fd_container->val[IDX] = (dv_dx - du_dy)*scaling_factor; } __global__ void gpu_compute_abs_vert_vort(fd_container *fd_container, const int NY, const int NX, float scaling_factor) { int IDX = blockIdx.x * blockDim.x + threadIdx.x; if (IDX >= (NY*NX)) return; float dv_dx = 0.0f; float du_dy = 0.0f; get_rel_vert_vort(fd_container, IDX, &dv_dx, &du_dy); float earth_angular_velocity = 7.2921e-5; // rad/s float rad_lat = fd_container->buffer[IDX] * M_PI/180.0f; float f = 2.0f*earth_angular_velocity*sinf(rad_lat); fd_container->val[IDX] = (f + (dv_dx - du_dy))*scaling_factor; } #endif
01aa67b0352e4d0f496739d52d777af9e36a76f8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "uplo_lgamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; hipMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; hipMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( uplo_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( uplo_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( uplo_lgamma), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
01aa67b0352e4d0f496739d52d777af9e36a76f8.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "uplo_lgamma.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int sd = 1; const int unit = 1; const int bottom = 1; const REAL *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); const int offset_a = 1; const int ld_a = 1; REAL *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); const int offset_b = 1; const int ld_b = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); uplo_lgamma<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { uplo_lgamma<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { uplo_lgamma<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
baf04d570f8364d0e2bdd2c49ecc2ec0dd32fe41.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Full license terms provided in LICENSE.md file. */ #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <NvInfer.h> #include <opencv2/opencv.hpp> #include "examples/classify_image/utils.h" using namespace std; using namespace nvinfer1; class Logger : public ILogger { void log(Severity severity, const char * msg) override { if (severity != Severity::kINFO) cout << msg << endl; } } gLogger; /** * image_file: path to image * plan_file: path of the serialized engine file * label_file: file with <class_name> per line * input_name: name of the input tensor * output_name: name of the output tensor * preprocessing_fn: 'vgg' or 'inception' */ int main(int argc, char *argv[]) { if (argc != 7) { cout << "Usage: classify_image <image_file> <plan_file> <label_file> <input_name> <output_name> <preprocessing_fn>\n"; return 0; } string imageFilename = argv[1]; string planFilename = argv[2]; string labelFilename = argv[3]; string inputName = argv[4]; string outputName = argv[5]; string preprocessingFn = argv[6]; /* load the engine */ cout << "Loading TensorRT engine from plan file..." << endl; ifstream planFile(planFilename); if (!planFile.is_open()) { cout << "Could not open plan file." << endl; return 1; } stringstream planBuffer; planBuffer << planFile.rdbuf(); string plan = planBuffer.str(); IRuntime *runtime = createInferRuntime(gLogger); ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr); IExecutionContext *context = engine->createExecutionContext(); /* get the input / output dimensions */ int inputBindingIndex, outputBindingIndex; inputBindingIndex = engine->getBindingIndex(inputName.c_str()); outputBindingIndex = engine->getBindingIndex(outputName.c_str()); if (inputBindingIndex < 0) { cout << "Invalid input name." << endl; return 1; } if (outputBindingIndex < 0) { cout << "Invalid output name." << endl; return 1; } Dims inputDims, outputDims; inputDims = engine->getBindingDimensions(inputBindingIndex); outputDims = engine->getBindingDimensions(outputBindingIndex); int inputWidth, inputHeight; inputHeight = inputDims.d[1]; inputWidth = inputDims.d[2]; /* read image, convert color, and resize */ cout << "Preprocessing input..." << endl; // cv::Mat image = cv::imread(imageFilename, CV_LOAD_IMAGE_COLOR); cv::Mat image = cv::imread(imageFilename, cv::IMREAD_COLOR); if (image.data == NULL) { cout << "Could not read image from file." << endl; return 1; } cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3); cv::resize(image, image, cv::Size(inputWidth, inputHeight)); /* convert from uint8+NHWC to float+NCHW */ float *inputDataHost, *outputDataHost; size_t numInput, numOutput; numInput = numTensorElements(inputDims); numOutput = numTensorElements(outputDims); inputDataHost = (float*) malloc(numInput * sizeof(float)); outputDataHost = (float*) malloc(numOutput * sizeof(float)); cvImageToTensor(image, inputDataHost, inputDims); if (preprocessingFn == "vgg") preprocessVgg(inputDataHost, inputDims); else if (preprocessingFn == "inception") preprocessInception(inputDataHost, inputDims); else { cout << "Invalid preprocessing function argument, must be vgg or inception. \n" << endl; return 1; } /* transfer to device */ float *inputDataDevice, *outputDataDevice; hipMalloc(&inputDataDevice, numInput * sizeof(float)); hipMalloc(&outputDataDevice, numOutput * sizeof(float)); hipMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), hipMemcpyHostToDevice); void *bindings[2]; bindings[inputBindingIndex] = (void*) inputDataDevice; bindings[outputBindingIndex] = (void*) outputDataDevice; /* execute engine */ cout << "Executing inference engine..." << endl; const int kBatchSize = 1; context->execute(kBatchSize, bindings); /* transfer output back to host */ hipMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), hipMemcpyDeviceToHost); /* parse output */ vector<size_t> sortedIndices = argsort(outputDataHost, outputDims); cout << "\nThe top-5 indices are: "; for (int i = 0; i < 5; i++) cout << sortedIndices[i] << " "; ifstream labelsFile(labelFilename); if (!labelsFile.is_open()) { cout << "\nCould not open label file." << endl; return 1; } vector<string> labelMap; string label; while(getline(labelsFile, label)) { labelMap.push_back(label); } cout << "\nWhich corresponds to class labels: "; for (int i = 0; i < 5; i++) cout << endl << i << ". " << labelMap[sortedIndices[i]]; cout << endl; /* clean up */ runtime->destroy(); engine->destroy(); context->destroy(); free(inputDataHost); free(outputDataHost); hipFree(inputDataDevice); hipFree(outputDataDevice); return 0; }
baf04d570f8364d0e2bdd2c49ecc2ec0dd32fe41.cu
/** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * Full license terms provided in LICENSE.md file. */ #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <NvInfer.h> #include <opencv2/opencv.hpp> #include "examples/classify_image/utils.h" using namespace std; using namespace nvinfer1; class Logger : public ILogger { void log(Severity severity, const char * msg) override { if (severity != Severity::kINFO) cout << msg << endl; } } gLogger; /** * image_file: path to image * plan_file: path of the serialized engine file * label_file: file with <class_name> per line * input_name: name of the input tensor * output_name: name of the output tensor * preprocessing_fn: 'vgg' or 'inception' */ int main(int argc, char *argv[]) { if (argc != 7) { cout << "Usage: classify_image <image_file> <plan_file> <label_file> <input_name> <output_name> <preprocessing_fn>\n"; return 0; } string imageFilename = argv[1]; string planFilename = argv[2]; string labelFilename = argv[3]; string inputName = argv[4]; string outputName = argv[5]; string preprocessingFn = argv[6]; /* load the engine */ cout << "Loading TensorRT engine from plan file..." << endl; ifstream planFile(planFilename); if (!planFile.is_open()) { cout << "Could not open plan file." << endl; return 1; } stringstream planBuffer; planBuffer << planFile.rdbuf(); string plan = planBuffer.str(); IRuntime *runtime = createInferRuntime(gLogger); ICudaEngine *engine = runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr); IExecutionContext *context = engine->createExecutionContext(); /* get the input / output dimensions */ int inputBindingIndex, outputBindingIndex; inputBindingIndex = engine->getBindingIndex(inputName.c_str()); outputBindingIndex = engine->getBindingIndex(outputName.c_str()); if (inputBindingIndex < 0) { cout << "Invalid input name." << endl; return 1; } if (outputBindingIndex < 0) { cout << "Invalid output name." << endl; return 1; } Dims inputDims, outputDims; inputDims = engine->getBindingDimensions(inputBindingIndex); outputDims = engine->getBindingDimensions(outputBindingIndex); int inputWidth, inputHeight; inputHeight = inputDims.d[1]; inputWidth = inputDims.d[2]; /* read image, convert color, and resize */ cout << "Preprocessing input..." << endl; // cv::Mat image = cv::imread(imageFilename, CV_LOAD_IMAGE_COLOR); cv::Mat image = cv::imread(imageFilename, cv::IMREAD_COLOR); if (image.data == NULL) { cout << "Could not read image from file." << endl; return 1; } cv::cvtColor(image, image, cv::COLOR_BGR2RGB, 3); cv::resize(image, image, cv::Size(inputWidth, inputHeight)); /* convert from uint8+NHWC to float+NCHW */ float *inputDataHost, *outputDataHost; size_t numInput, numOutput; numInput = numTensorElements(inputDims); numOutput = numTensorElements(outputDims); inputDataHost = (float*) malloc(numInput * sizeof(float)); outputDataHost = (float*) malloc(numOutput * sizeof(float)); cvImageToTensor(image, inputDataHost, inputDims); if (preprocessingFn == "vgg") preprocessVgg(inputDataHost, inputDims); else if (preprocessingFn == "inception") preprocessInception(inputDataHost, inputDims); else { cout << "Invalid preprocessing function argument, must be vgg or inception. \n" << endl; return 1; } /* transfer to device */ float *inputDataDevice, *outputDataDevice; cudaMalloc(&inputDataDevice, numInput * sizeof(float)); cudaMalloc(&outputDataDevice, numOutput * sizeof(float)); cudaMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), cudaMemcpyHostToDevice); void *bindings[2]; bindings[inputBindingIndex] = (void*) inputDataDevice; bindings[outputBindingIndex] = (void*) outputDataDevice; /* execute engine */ cout << "Executing inference engine..." << endl; const int kBatchSize = 1; context->execute(kBatchSize, bindings); /* transfer output back to host */ cudaMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), cudaMemcpyDeviceToHost); /* parse output */ vector<size_t> sortedIndices = argsort(outputDataHost, outputDims); cout << "\nThe top-5 indices are: "; for (int i = 0; i < 5; i++) cout << sortedIndices[i] << " "; ifstream labelsFile(labelFilename); if (!labelsFile.is_open()) { cout << "\nCould not open label file." << endl; return 1; } vector<string> labelMap; string label; while(getline(labelsFile, label)) { labelMap.push_back(label); } cout << "\nWhich corresponds to class labels: "; for (int i = 0; i < 5; i++) cout << endl << i << ". " << labelMap[sortedIndices[i]]; cout << endl; /* clean up */ runtime->destroy(); engine->destroy(); context->destroy(); free(inputDataHost); free(outputDataHost); cudaFree(inputDataDevice); cudaFree(outputDataDevice); return 0; }
49b7af5f5151ab9231b416c4b63eb75896b219c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "cuda_apsp.cuh" /** * CUDA handle error, if error occurs print message and exit program * * @param error: CUDA error status */ #define HANDLE_ERROR(error) { \ if (error != hipSuccess) { \ fprintf(stderr, "%s in %s at line %d\n", \ hipGetErrorString(error), __FILE__, __LINE__); \ exit(EXIT_FAILURE); \ } \ } \ /** * Naive CUDA kernel implementation algorithm Floyd Wharshall for APSP * check if path from vertex x -> y will be short using vertex u x -> u -> y * for all vertices in graph * * @param u: Index of vertex u * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _naive_fw_kernel(const int u, size_t pitch, const int nvertex, int* const graph, int* const pred) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (y < nvertex && x < nvertex) { int indexYX = y * pitch + x; int indexUX = u * pitch + x; int newPath = graph[y * pitch + u] + graph[indexUX]; int oldPath = graph[indexYX]; if (oldPath > newPath) { graph[indexYX] = newPath; pred[indexYX] = pred[indexUX]; } } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Dependent phase 1 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_dependent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { __shared__ int cacheGraph[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePred[BLOCK_SIZE][BLOCK_SIZE]; const int idx = threadIdx.x; const int idy = threadIdx.y; const int v1 = BLOCK_SIZE * blockId + idy; const int v2 = BLOCK_SIZE * blockId + idx; int newPred; int newPath; const int cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { cacheGraph[idy][idx] = graph[cellId]; cachePred[idy][idx] = pred[cellId]; newPred = cachePred[idy][idx]; } else { cacheGraph[idy][idx] = MAX_DISTANCE; cachePred[idy][idx] = -1; } // Synchronize to make sure the all value are loaded in block __syncthreads(); #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraph[idy][u] + cacheGraph[u][idx]; // Synchronize before calculate new value __syncthreads(); if (newPath < cacheGraph[idy][idx]) { cacheGraph[idy][idx] = newPath; newPred = cachePred[u][idx]; } // Synchronize to make sure that all value are current __syncthreads(); cachePred[idy][idx] = newPred; } if (v1 < nvertex && v2 < nvertex) { graph[cellId] = cacheGraph[idy][idx]; pred[cellId] = cachePred[idy][idx]; } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Partial dependent phase 2 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_partial_dependent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { if (blockIdx.x == blockId) return; const int idx = threadIdx.x; const int idy = threadIdx.y; int v1 = BLOCK_SIZE * blockId + idy; int v2 = BLOCK_SIZE * blockId + idx; __shared__ int cacheGraphBase[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePredBase[BLOCK_SIZE][BLOCK_SIZE]; // Load base block for graph and predecessors int cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { cacheGraphBase[idy][idx] = graph[cellId]; cachePredBase[idy][idx] = pred[cellId]; } else { cacheGraphBase[idy][idx] = MAX_DISTANCE; cachePredBase[idy][idx] = -1; } // Load i-aligned singly dependent blocks if (blockIdx.y == 0) { v2 = BLOCK_SIZE * blockIdx.x + idx; } else { // Load j-aligned singly dependent blocks v1 = BLOCK_SIZE * blockIdx.x + idy; } __shared__ int cacheGraph[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePred[BLOCK_SIZE][BLOCK_SIZE]; // Load current block for graph and predecessors int currentPath; int currentPred; cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { currentPath = graph[cellId]; currentPred = pred[cellId]; } else { currentPath = MAX_DISTANCE; currentPred = -1; } cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure the all value are saved in cache __syncthreads(); int newPath; // Compute i-aligned singly dependent blocks if (blockIdx.y == 0) { #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraphBase[idy][u] + cacheGraph[u][idx]; if (newPath < currentPath) { currentPath = newPath; currentPred = cachePred[u][idx]; } // Synchronize to make sure that all threads compare new value with old __syncthreads(); // Update new values cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure that all threads update cache __syncthreads(); } } else { // Compute j-aligned singly dependent blocks #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraph[idy][u] + cacheGraphBase[u][idx]; if (newPath < currentPath) { currentPath = newPath; currentPred = cachePredBase[u][idx]; } // Synchronize to make sure that all threads compare new value with old __syncthreads(); // Update new values cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure that all threads update cache __syncthreads(); } } if (v1 < nvertex && v2 < nvertex) { graph[cellId] = currentPath; pred[cellId] = currentPred; } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Independent phase 3 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_independent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { if (blockIdx.x == blockId || blockIdx.y == blockId) return; const int idx = threadIdx.x; const int idy = threadIdx.y; const int v1 = blockDim.y * blockIdx.y + idy; const int v2 = blockDim.x * blockIdx.x + idx; __shared__ int cacheGraphBaseRow[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cacheGraphBaseCol[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePredBaseRow[BLOCK_SIZE][BLOCK_SIZE]; int v1Row = BLOCK_SIZE * blockId + idy; int v2Col = BLOCK_SIZE * blockId + idx; // Load data for block int cellId; if (v1Row < nvertex && v2 < nvertex) { cellId = v1Row * pitch + v2; cacheGraphBaseRow[idy][idx] = graph[cellId]; cachePredBaseRow[idy][idx] = pred[cellId]; } else { cacheGraphBaseRow[idy][idx] = MAX_DISTANCE; cachePredBaseRow[idy][idx] = -1; } if (v1 < nvertex && v2Col < nvertex) { cellId = v1 * pitch + v2Col; cacheGraphBaseCol[idy][idx] = graph[cellId]; } else { cacheGraphBaseCol[idy][idx] = MAX_DISTANCE; } // Synchronize to make sure the all value are loaded in virtual block __syncthreads(); int currentPath; int currentPred; int newPath; // Compute data for block if (v1 < nvertex && v2 < nvertex) { cellId = v1 * pitch + v2; currentPath = graph[cellId]; currentPred = pred[cellId]; #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraphBaseCol[idy][u] + cacheGraphBaseRow[u][idx]; if (currentPath > newPath) { currentPath = newPath; currentPred = cachePredBaseRow[u][idx]; } } graph[cellId] = currentPath; pred[cellId] = currentPred; } } /** * Allocate memory on device and copy memory from host to device * @param dataHost: Reference to unique ptr to graph data with allocated fields on host * @param graphDevice: Pointer to array of graph with distance between vertex on device * @param predDevice: Pointer to array of predecessors for a graph on device * * @return: Pitch for allocation */ static size_t _cudaMoveMemoryToDevice(const std::unique_ptr<graphAPSPTopology>& dataHost, int **graphDevice, int **predDevice) { size_t height = dataHost->nvertex; size_t width = height * sizeof(int); size_t pitch; // Allocate GPU buffers for matrix of shortest paths d(G) and predecessors p(G) HANDLE_ERROR(hipMallocPitch(graphDevice, &pitch, width, height)); HANDLE_ERROR(hipMallocPitch(predDevice, &pitch, width, height)); // Copy input from host memory to GPU buffers and HANDLE_ERROR(hipMemcpy2D(*graphDevice, pitch, dataHost->graph.get(), width, width, height, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy2D(*predDevice, pitch, dataHost->pred.get(), width, width, height, hipMemcpyHostToDevice)); return pitch; } /** * Copy memory from device to host and free device memory * * @param graphDevice: Array of graph with distance between vertex on device * @param predDevice: Array of predecessors for a graph on device * @param dataHost: Reference to unique ptr to graph data with allocated fields on host * @param pitch: Pitch for allocation */ static void _cudaMoveMemoryToHost(int *graphDevice, int *predDevice, const std::unique_ptr<graphAPSPTopology>& dataHost, size_t pitch) { size_t height = dataHost->nvertex; size_t width = height * sizeof(int); HANDLE_ERROR(hipMemcpy2D(dataHost->pred.get(), width, predDevice, pitch, width, height, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy2D(dataHost->graph.get(), width, graphDevice, pitch, width, height, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipFree(predDevice)); HANDLE_ERROR(hipFree(graphDevice)); } /** * Naive implementation of Floyd Warshall algorithm in CUDA * * @param dataHost: Reference to unique ptr to graph data with allocated fields on host */ void cudaNaiveFW(const std::unique_ptr<graphAPSPTopology>& dataHost) { // Choose which GPU to run on, change this on a multi-GPU system. HANDLE_ERROR(hipSetDevice(0)); int nvertex = dataHost->nvertex; // Initialize the grid and block dimensions here dim3 dimGrid((nvertex - 1) / BLOCK_SIZE + 1, (nvertex - 1) / BLOCK_SIZE + 1, 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); int *graphDevice, *predDevice; size_t pitch = _cudaMoveMemoryToDevice(dataHost, &graphDevice, &predDevice); hipFuncSetCacheConfig(_naive_fw_kernel, hipFuncCachePreferL1); for(int vertex = 0; vertex < nvertex; ++vertex) { hipLaunchKernelGGL(( _naive_fw_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, vertex, pitch / sizeof(int), nvertex, graphDevice, predDevice); } // Check for any errors launching the kernel HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipDeviceSynchronize()); _cudaMoveMemoryToHost(graphDevice, predDevice, dataHost, pitch); } /** * Blocked implementation of Floyd Warshall algorithm in CUDA * * @param data: unique ptr to graph data with allocated fields on host */ void cudaBlockedFW(const std::unique_ptr<graphAPSPTopology>& dataHost) { HANDLE_ERROR(hipSetDevice(0)); int nvertex = dataHost->nvertex; int *graphDevice, *predDevice; size_t pitch = _cudaMoveMemoryToDevice(dataHost, &graphDevice, &predDevice); dim3 gridPhase1(1 ,1, 1); dim3 gridPhase2((nvertex - 1) / BLOCK_SIZE + 1, 2 , 1); dim3 gridPhase3((nvertex - 1) / BLOCK_SIZE + 1, (nvertex - 1) / BLOCK_SIZE + 1 , 1); dim3 dimBlockSize(BLOCK_SIZE, BLOCK_SIZE, 1); int numBlock = (nvertex - 1) / BLOCK_SIZE + 1; for(int blockID = 0; blockID < numBlock; ++blockID) { // Start dependent phase hipLaunchKernelGGL(( _blocked_fw_dependent_ph), dim3(gridPhase1), dim3(dimBlockSize), 0, 0, blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); // Start partially dependent phase hipLaunchKernelGGL(( _blocked_fw_partial_dependent_ph), dim3(gridPhase2), dim3(dimBlockSize), 0, 0, blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); // Start independent phase hipLaunchKernelGGL(( _blocked_fw_independent_ph), dim3(gridPhase3), dim3(dimBlockSize), 0, 0, blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); } // Check for any errors launching the kernel HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipDeviceSynchronize()); _cudaMoveMemoryToHost(graphDevice, predDevice, dataHost, pitch); }
49b7af5f5151ab9231b416c4b63eb75896b219c8.cu
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_apsp.cuh" /** * CUDA handle error, if error occurs print message and exit program * * @param error: CUDA error status */ #define HANDLE_ERROR(error) { \ if (error != cudaSuccess) { \ fprintf(stderr, "%s in %s at line %d\n", \ cudaGetErrorString(error), __FILE__, __LINE__); \ exit(EXIT_FAILURE); \ } \ } \ /** * Naive CUDA kernel implementation algorithm Floyd Wharshall for APSP * check if path from vertex x -> y will be short using vertex u x -> u -> y * for all vertices in graph * * @param u: Index of vertex u * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _naive_fw_kernel(const int u, size_t pitch, const int nvertex, int* const graph, int* const pred) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (y < nvertex && x < nvertex) { int indexYX = y * pitch + x; int indexUX = u * pitch + x; int newPath = graph[y * pitch + u] + graph[indexUX]; int oldPath = graph[indexYX]; if (oldPath > newPath) { graph[indexYX] = newPath; pred[indexYX] = pred[indexUX]; } } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Dependent phase 1 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_dependent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { __shared__ int cacheGraph[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePred[BLOCK_SIZE][BLOCK_SIZE]; const int idx = threadIdx.x; const int idy = threadIdx.y; const int v1 = BLOCK_SIZE * blockId + idy; const int v2 = BLOCK_SIZE * blockId + idx; int newPred; int newPath; const int cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { cacheGraph[idy][idx] = graph[cellId]; cachePred[idy][idx] = pred[cellId]; newPred = cachePred[idy][idx]; } else { cacheGraph[idy][idx] = MAX_DISTANCE; cachePred[idy][idx] = -1; } // Synchronize to make sure the all value are loaded in block __syncthreads(); #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraph[idy][u] + cacheGraph[u][idx]; // Synchronize before calculate new value __syncthreads(); if (newPath < cacheGraph[idy][idx]) { cacheGraph[idy][idx] = newPath; newPred = cachePred[u][idx]; } // Synchronize to make sure that all value are current __syncthreads(); cachePred[idy][idx] = newPred; } if (v1 < nvertex && v2 < nvertex) { graph[cellId] = cacheGraph[idy][idx]; pred[cellId] = cachePred[idy][idx]; } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Partial dependent phase 2 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_partial_dependent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { if (blockIdx.x == blockId) return; const int idx = threadIdx.x; const int idy = threadIdx.y; int v1 = BLOCK_SIZE * blockId + idy; int v2 = BLOCK_SIZE * blockId + idx; __shared__ int cacheGraphBase[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePredBase[BLOCK_SIZE][BLOCK_SIZE]; // Load base block for graph and predecessors int cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { cacheGraphBase[idy][idx] = graph[cellId]; cachePredBase[idy][idx] = pred[cellId]; } else { cacheGraphBase[idy][idx] = MAX_DISTANCE; cachePredBase[idy][idx] = -1; } // Load i-aligned singly dependent blocks if (blockIdx.y == 0) { v2 = BLOCK_SIZE * blockIdx.x + idx; } else { // Load j-aligned singly dependent blocks v1 = BLOCK_SIZE * blockIdx.x + idy; } __shared__ int cacheGraph[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePred[BLOCK_SIZE][BLOCK_SIZE]; // Load current block for graph and predecessors int currentPath; int currentPred; cellId = v1 * pitch + v2; if (v1 < nvertex && v2 < nvertex) { currentPath = graph[cellId]; currentPred = pred[cellId]; } else { currentPath = MAX_DISTANCE; currentPred = -1; } cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure the all value are saved in cache __syncthreads(); int newPath; // Compute i-aligned singly dependent blocks if (blockIdx.y == 0) { #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraphBase[idy][u] + cacheGraph[u][idx]; if (newPath < currentPath) { currentPath = newPath; currentPred = cachePred[u][idx]; } // Synchronize to make sure that all threads compare new value with old __syncthreads(); // Update new values cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure that all threads update cache __syncthreads(); } } else { // Compute j-aligned singly dependent blocks #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraph[idy][u] + cacheGraphBase[u][idx]; if (newPath < currentPath) { currentPath = newPath; currentPred = cachePredBase[u][idx]; } // Synchronize to make sure that all threads compare new value with old __syncthreads(); // Update new values cacheGraph[idy][idx] = currentPath; cachePred[idy][idx] = currentPred; // Synchronize to make sure that all threads update cache __syncthreads(); } } if (v1 < nvertex && v2 < nvertex) { graph[cellId] = currentPath; pred[cellId] = currentPred; } } /** * Blocked CUDA kernel implementation algorithm Floyd Wharshall for APSP * Independent phase 3 * * @param blockId: Index of block * @param nvertex: Number of all vertex in graph * @param pitch: Length of row in memory * @param graph: Array of graph with distance between vertex on device * @param pred: Array of predecessors for a graph on device */ static __global__ void _blocked_fw_independent_ph(const int blockId, size_t pitch, const int nvertex, int* const graph, int* const pred) { if (blockIdx.x == blockId || blockIdx.y == blockId) return; const int idx = threadIdx.x; const int idy = threadIdx.y; const int v1 = blockDim.y * blockIdx.y + idy; const int v2 = blockDim.x * blockIdx.x + idx; __shared__ int cacheGraphBaseRow[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cacheGraphBaseCol[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int cachePredBaseRow[BLOCK_SIZE][BLOCK_SIZE]; int v1Row = BLOCK_SIZE * blockId + idy; int v2Col = BLOCK_SIZE * blockId + idx; // Load data for block int cellId; if (v1Row < nvertex && v2 < nvertex) { cellId = v1Row * pitch + v2; cacheGraphBaseRow[idy][idx] = graph[cellId]; cachePredBaseRow[idy][idx] = pred[cellId]; } else { cacheGraphBaseRow[idy][idx] = MAX_DISTANCE; cachePredBaseRow[idy][idx] = -1; } if (v1 < nvertex && v2Col < nvertex) { cellId = v1 * pitch + v2Col; cacheGraphBaseCol[idy][idx] = graph[cellId]; } else { cacheGraphBaseCol[idy][idx] = MAX_DISTANCE; } // Synchronize to make sure the all value are loaded in virtual block __syncthreads(); int currentPath; int currentPred; int newPath; // Compute data for block if (v1 < nvertex && v2 < nvertex) { cellId = v1 * pitch + v2; currentPath = graph[cellId]; currentPred = pred[cellId]; #pragma unroll for (int u = 0; u < BLOCK_SIZE; ++u) { newPath = cacheGraphBaseCol[idy][u] + cacheGraphBaseRow[u][idx]; if (currentPath > newPath) { currentPath = newPath; currentPred = cachePredBaseRow[u][idx]; } } graph[cellId] = currentPath; pred[cellId] = currentPred; } } /** * Allocate memory on device and copy memory from host to device * @param dataHost: Reference to unique ptr to graph data with allocated fields on host * @param graphDevice: Pointer to array of graph with distance between vertex on device * @param predDevice: Pointer to array of predecessors for a graph on device * * @return: Pitch for allocation */ static size_t _cudaMoveMemoryToDevice(const std::unique_ptr<graphAPSPTopology>& dataHost, int **graphDevice, int **predDevice) { size_t height = dataHost->nvertex; size_t width = height * sizeof(int); size_t pitch; // Allocate GPU buffers for matrix of shortest paths d(G) and predecessors p(G) HANDLE_ERROR(cudaMallocPitch(graphDevice, &pitch, width, height)); HANDLE_ERROR(cudaMallocPitch(predDevice, &pitch, width, height)); // Copy input from host memory to GPU buffers and HANDLE_ERROR(cudaMemcpy2D(*graphDevice, pitch, dataHost->graph.get(), width, width, height, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy2D(*predDevice, pitch, dataHost->pred.get(), width, width, height, cudaMemcpyHostToDevice)); return pitch; } /** * Copy memory from device to host and free device memory * * @param graphDevice: Array of graph with distance between vertex on device * @param predDevice: Array of predecessors for a graph on device * @param dataHost: Reference to unique ptr to graph data with allocated fields on host * @param pitch: Pitch for allocation */ static void _cudaMoveMemoryToHost(int *graphDevice, int *predDevice, const std::unique_ptr<graphAPSPTopology>& dataHost, size_t pitch) { size_t height = dataHost->nvertex; size_t width = height * sizeof(int); HANDLE_ERROR(cudaMemcpy2D(dataHost->pred.get(), width, predDevice, pitch, width, height, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy2D(dataHost->graph.get(), width, graphDevice, pitch, width, height, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(predDevice)); HANDLE_ERROR(cudaFree(graphDevice)); } /** * Naive implementation of Floyd Warshall algorithm in CUDA * * @param dataHost: Reference to unique ptr to graph data with allocated fields on host */ void cudaNaiveFW(const std::unique_ptr<graphAPSPTopology>& dataHost) { // Choose which GPU to run on, change this on a multi-GPU system. HANDLE_ERROR(cudaSetDevice(0)); int nvertex = dataHost->nvertex; // Initialize the grid and block dimensions here dim3 dimGrid((nvertex - 1) / BLOCK_SIZE + 1, (nvertex - 1) / BLOCK_SIZE + 1, 1); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); int *graphDevice, *predDevice; size_t pitch = _cudaMoveMemoryToDevice(dataHost, &graphDevice, &predDevice); cudaFuncSetCacheConfig(_naive_fw_kernel, cudaFuncCachePreferL1); for(int vertex = 0; vertex < nvertex; ++vertex) { _naive_fw_kernel<<<dimGrid, dimBlock>>>(vertex, pitch / sizeof(int), nvertex, graphDevice, predDevice); } // Check for any errors launching the kernel HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaDeviceSynchronize()); _cudaMoveMemoryToHost(graphDevice, predDevice, dataHost, pitch); } /** * Blocked implementation of Floyd Warshall algorithm in CUDA * * @param data: unique ptr to graph data with allocated fields on host */ void cudaBlockedFW(const std::unique_ptr<graphAPSPTopology>& dataHost) { HANDLE_ERROR(cudaSetDevice(0)); int nvertex = dataHost->nvertex; int *graphDevice, *predDevice; size_t pitch = _cudaMoveMemoryToDevice(dataHost, &graphDevice, &predDevice); dim3 gridPhase1(1 ,1, 1); dim3 gridPhase2((nvertex - 1) / BLOCK_SIZE + 1, 2 , 1); dim3 gridPhase3((nvertex - 1) / BLOCK_SIZE + 1, (nvertex - 1) / BLOCK_SIZE + 1 , 1); dim3 dimBlockSize(BLOCK_SIZE, BLOCK_SIZE, 1); int numBlock = (nvertex - 1) / BLOCK_SIZE + 1; for(int blockID = 0; blockID < numBlock; ++blockID) { // Start dependent phase _blocked_fw_dependent_ph<<<gridPhase1, dimBlockSize>>> (blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); // Start partially dependent phase _blocked_fw_partial_dependent_ph<<<gridPhase2, dimBlockSize>>> (blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); // Start independent phase _blocked_fw_independent_ph<<<gridPhase3, dimBlockSize>>> (blockID, pitch / sizeof(int), nvertex, graphDevice, predDevice); } // Check for any errors launching the kernel HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaDeviceSynchronize()); _cudaMoveMemoryToHost(graphDevice, predDevice, dataHost, pitch); }
ed166aac475c56f07f8ec449c07c5f2bccb227e7.hip
// !!! This is a file automatically generated by hipify!!! /* * * * Created on: 27.6.2011 * Author: Teemu Rantalaiho ([email protected]) * * * Copyright 2011 Teemu Rantalaiho * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Compile with: * * nvcc -O4 -arch=<cuda_arch> -I../ test_mem_multi_4int.cu -o test_mem_multi_4int * * thrust codepath (-DTHRUST) seems not to be up to date in this test - may give wrong results here * */ #define TESTMAXIDX 512 // 16 keys / indices #define TEST_IS_POW2 1 #define TEST_SIZE (25 * 1000 * 1000 ) // 25 million inputs (100 million keys) #define NRUNS 100 // Repeat 100 times => 10 Gigainputs in total (4 keys per entry) #define START_INDEX 0 #define NSTRESS_RUNS NRUNS #ifdef THRUST #define ENABLE_THRUST 1 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key) #endif #include "cuda_histogram.h" /*#include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h>*/ #if ENABLE_THRUST #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> #endif #include <assert.h> #include <stdio.h> // Always return 1 -> normal histogram - each sample has same weight struct test_xform2 { __host__ __device__ void operator() (uint4* input, int i, int* result_index, int* results, int nresults) const { uint4 idata = input[i]; #pragma unroll for (int resIdx = 0; resIdx < 4; resIdx++) { unsigned int data = ((unsigned int*)(&idata))[resIdx]; #if TEST_IS_POW2 *result_index++ = data & (TESTMAXIDX - 1); #else *result_index++ = data % (TESTMAXIDX); #endif *results++ = 1; } } }; struct test_sumfun2 { __device__ __host__ int operator() (int res1, int res2) const{ return res1 + res2; } }; static void printres (int* res, int nres, const char* descr) { if (descr) printf("\n%s:\n", descr); printf("vals = [ "); for (int i = 0; i < nres; i++) printf("(%d), ", res[i]); printf("]\n"); } static void testHistogramParam(uint4* INPUT, uint4* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress, void* tmpbuf) { int nIndex = TESTMAXIDX; int srun; int nruns = stress ? NSTRESS_RUNS : 1; test_sumfun2 sumFun; test_xform2 transformFun; //test_indexfun2 indexFun; int* tmpres = (int*)malloc(sizeof(int) * nIndex); int* cpures = stress ? (int*)malloc(sizeof(int) * nIndex) : tmpres; int zero = 0; for (srun = 0; srun < nruns; srun++) { { int* tmpidx = (int*)malloc(sizeof(int) * nIndex); if (print) printf("\nTest reduce_by_key:\n\n"); memset(tmpres, 0, sizeof(int) * nIndex); if (stress) memset(cpures, 0, sizeof(int) * nIndex); if (cpurun || stress) for (int i = index_0; i < index_1; i++) { int index[4]; int tmp[4]; transformFun(hostINPUT, i, &index[0], &tmp[0], 1); //index = indexFun(INPUT, i); for (int tmpi = 0; tmpi < 4; tmpi++) cpures[index[tmpi]] = sumFun(cpures[index[tmpi]], tmp[tmpi]); //printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag); } if (print && cpurun) { printres(cpures, nIndex, "CPU results:"); } } if (!cpurun) callHistogramKernel<histogram_atomic_inc, 4>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex, false, 0, tmpbuf); if (stress) { int k; for (k = 0; k < nIndex; k++) { if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/) { printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1); printres(cpures, nIndex, "CPU results:"); printres(tmpres, nIndex, "GPU results:"); } } } if (print && (!cpurun)) { printres(tmpres, nIndex, "GPU results:"); } int size = index_1 - index_0; index_0 += 1; index_1 -= 1; if (index_0 > index_1 + 1) { int tmp = index_0; index_0 = index_1; index_1 = tmp; } if (index_0 < 0 || index_1 < 0) { index_0 = 0; index_1 = size - 1; } } free(tmpres); if (stress) free(cpures); } #if ENABLE_THRUST // NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1 // And also that we know before hand the number of indices coming out static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print) { test_sumfun2 mysumfun; thrust::equal_to<int> binary_pred; int nIndex = TESTMAXIDX; int N = index_1 - index_0; thrust::device_vector<int> keys_out(nIndex); thrust::device_vector<int> vals_out(nIndex); thrust::device_vector<int> h_vals_out(nIndex); //thrust::device_vector<int> keys(N); thrust::device_ptr<int> keys(INPUT); // Sort the data thrust::sort(keys, keys + N); // And reduce by key - histogram complete thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun); h_vals_out = vals_out; if (print) { printf("\nThrust results:\n"); printf("vals = [ "); for (int i = 0; i < nIndex; i++) { int tmp = h_vals_out[i]; printf("(%d), ", tmp); } printf("]\n"); } } #endif void printUsage(void) { printf("\n"); printf("Test order independent reduce-by-key / histogram algorithm\n\n"); printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n"); printf("\tOptions:\n\n"); printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n"); printf("\t\t--print\t\t Print results of algorithm (check validity)\n"); printf("\t\t--thrust\t Run on GPU but using thrust library\n"); printf("\t\t--load\t Use 32-bit texture data s\n"); printf("\t\t--rnd\t Take uniform random keys s\n"); // printf("\t\t--sharp\t Make peaks sharp\n"); // printf("\t\t--nornd\t Remove random noise from input\n"); } static unsigned int* MyTexture_load(char* filename, int* dataSize) { FILE* file = fopen(filename, "rb"); //texture->dataRGBA8888 = NULL; if (!file) { char* tmp = (char*)malloc(strlen(filename) + 10); if (tmp) { char* ptr = tmp; strcpy(ptr, "../"); ptr += 3; strcpy(ptr, filename); file = fopen(tmp, "rb"); } } // Read if (file) { int npixels = 512 * 512;//texture->width * texture->height; int size = npixels * 4; unsigned int* data = (unsigned int*)malloc(size); *dataSize = npixels; if (data) { int i; for (i = 0; i < npixels; i++) { unsigned int r, g, b; unsigned int raw = 0; unsigned int pixel = 0; int rsize = fread(&raw, 3, 1, file); if (rsize != 1) { printf( "Warning: Unexpected EOF in texture %s at idx %d\n", filename, i); break; } r = (raw & 0x00FF0000) >> 16; g = (raw & 0x0000FF00) >> 8; b = (raw & 0x000000FF) >> 0; pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0); data[i] = pixel; } } fclose(file); return data; } return NULL; } static inline int getInput(size_t i, unsigned int* texData, int dataSize, bool rnd) { if (texData) { static size_t index = i % dataSize; static int round = 0; unsigned int val = texData[index]; int result; result = val + round; index++; if (index >= dataSize) { index = 0; round += 7; } #if TEST_IS_POW2 result = (int)(result); #else result = (int)(result); #endif return result; } else { static unsigned int current = 0xf1232345; const unsigned int mult = 1664525; const unsigned int add = 1013904223ul; //int peakWidth = sharp ? 100 : 10000; // static int nextPeak = 200; // static int currentBase = 0; // static int currentWidth = TESTMAXIDX; current = current * mult + add; if (!rnd) current = i / 100; #if TEST_IS_POW2 i = (int)(current); #else i = (int)(current); #endif return i; } } static void fillInput(int* input, bool load, bool rnd) { size_t i; unsigned int* texData = NULL; int dataSize = 0; if (load && !rnd) { texData = MyTexture_load("texture.raw", &dataSize); } for (i = 0; i < TEST_SIZE * 4;) { *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; } if (texData) free(texData); } int main (int argc, char** argv) { int i; int index_0 = START_INDEX; int index_1 = index_0 + TEST_SIZE; bool cpu = false; bool print = false; bool thrust = false; bool stress = false; // bool peaks = false; // bool sharp = false; bool rnd = false; bool load = false; void* tmpbuf = NULL; printUsage(); for (i = 0; i < argc; i++) { if (argv[i] && strcmp(argv[i], "--cpu") == 0) cpu = true; if (argv[i] && strcmp(argv[i], "--print") == 0) print = true; if (argv[i] && strcmp(argv[i], "--thrust") == 0) thrust = true; if (argv[i] && strcmp(argv[i], "--stress") == 0) stress = true; // if (argv[i] && strcmp(argv[i], "--peaks") == 0) // peaks = true; if (argv[i] && strcmp(argv[i], "--load") == 0) load = true; // if (argv[i] && strcmp(argv[i], "--sharp") == 0) // sharp = true; if (argv[i] && strcmp(argv[i], "--rnd") == 0) rnd = true; } { // Allocate keys: int* INPUT = NULL; int* hostINPUT = (int*)malloc(4 * sizeof(int) * (TEST_SIZE + 3)); assert(hostINPUT); fillInput(hostINPUT, load, rnd); if (!cpu) { hipMalloc(&INPUT, 4 * sizeof(int) * TEST_SIZE); assert(INPUT); hipMemcpy(INPUT, hostINPUT, 4 * sizeof(int) * TEST_SIZE, hipMemcpyHostToDevice); int tmpbufSize = getHistogramBufSize<histogram_atomic_inc>(0, TESTMAXIDX); hipMalloc(&tmpbuf, tmpbufSize); } // Create events for timing: hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Now start timer - we run on stream 0 (default stream): hipEventRecord(start, 0); for (i = 0; i < NRUNS; i++) { if (thrust) { #if ENABLE_THRUST testHistogramParamThrust(INPUT, index_0, 4*index_1, print); #else printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n"); break; #endif } else { testHistogramParam((uint4*)INPUT, (uint4*)hostINPUT, index_0, index_1, print, cpu, stress, tmpbuf); } print = false; // Run only once all stress-tests if (stress) break; } { float t_ms; hipEventRecord(stop, 0); hipDeviceSynchronize(); hipEventElapsedTime(&t_ms, start, stop); double t = t_ms * 0.001f; double GKps = (((double)TEST_SIZE * (double)NRUNS * 4.0)) / (t*1.e9); printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps); } if (tmpbuf) hipFree(tmpbuf); if (INPUT) hipFree(INPUT); if (hostINPUT) free(hostINPUT); hipEventDestroy(start); hipEventDestroy(stop); } return 0; }
ed166aac475c56f07f8ec449c07c5f2bccb227e7.cu
/* * * * Created on: 27.6.2011 * Author: Teemu Rantalaiho ([email protected]) * * * Copyright 2011 Teemu Rantalaiho * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * Compile with: * * nvcc -O4 -arch=<cuda_arch> -I../ test_mem_multi_4int.cu -o test_mem_multi_4int * * thrust codepath (-DTHRUST) seems not to be up to date in this test - may give wrong results here * */ #define TESTMAXIDX 512 // 16 keys / indices #define TEST_IS_POW2 1 #define TEST_SIZE (25 * 1000 * 1000 ) // 25 million inputs (100 million keys) #define NRUNS 100 // Repeat 100 times => 10 Gigainputs in total (4 keys per entry) #define START_INDEX 0 #define NSTRESS_RUNS NRUNS #ifdef THRUST #define ENABLE_THRUST 1 // Enable thrust-based version also (xform-sort_by_key-reduce_by_key) #endif #include "cuda_histogram.h" /*#include <cuda_runtime_api.h> #include <cuda_runtime.h> #include <cuda.h>*/ #if ENABLE_THRUST #include <thrust/transform_reduce.h> #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_iterator.h> #include <thrust/sort.h> #endif #include <assert.h> #include <stdio.h> // Always return 1 -> normal histogram - each sample has same weight struct test_xform2 { __host__ __device__ void operator() (uint4* input, int i, int* result_index, int* results, int nresults) const { uint4 idata = input[i]; #pragma unroll for (int resIdx = 0; resIdx < 4; resIdx++) { unsigned int data = ((unsigned int*)(&idata))[resIdx]; #if TEST_IS_POW2 *result_index++ = data & (TESTMAXIDX - 1); #else *result_index++ = data % (TESTMAXIDX); #endif *results++ = 1; } } }; struct test_sumfun2 { __device__ __host__ int operator() (int res1, int res2) const{ return res1 + res2; } }; static void printres (int* res, int nres, const char* descr) { if (descr) printf("\n%s:\n", descr); printf("vals = [ "); for (int i = 0; i < nres; i++) printf("(%d), ", res[i]); printf("]\n"); } static void testHistogramParam(uint4* INPUT, uint4* hostINPUT, int index_0, int index_1, bool print, bool cpurun, bool stress, void* tmpbuf) { int nIndex = TESTMAXIDX; int srun; int nruns = stress ? NSTRESS_RUNS : 1; test_sumfun2 sumFun; test_xform2 transformFun; //test_indexfun2 indexFun; int* tmpres = (int*)malloc(sizeof(int) * nIndex); int* cpures = stress ? (int*)malloc(sizeof(int) * nIndex) : tmpres; int zero = 0; for (srun = 0; srun < nruns; srun++) { { int* tmpidx = (int*)malloc(sizeof(int) * nIndex); if (print) printf("\nTest reduce_by_key:\n\n"); memset(tmpres, 0, sizeof(int) * nIndex); if (stress) memset(cpures, 0, sizeof(int) * nIndex); if (cpurun || stress) for (int i = index_0; i < index_1; i++) { int index[4]; int tmp[4]; transformFun(hostINPUT, i, &index[0], &tmp[0], 1); //index = indexFun(INPUT, i); for (int tmpi = 0; tmpi < 4; tmpi++) cpures[index[tmpi]] = sumFun(cpures[index[tmpi]], tmp[tmpi]); //printf("i = %d, out_index = %d, out_val = (%.3f, %.3f) \n",i, index, tmp.real, tmp.imag); } if (print && cpurun) { printres(cpures, nIndex, "CPU results:"); } } if (!cpurun) callHistogramKernel<histogram_atomic_inc, 4>(INPUT, transformFun, /*indexFun,*/ sumFun, index_0, index_1, zero, tmpres, nIndex, false, 0, tmpbuf); if (stress) { int k; for (k = 0; k < nIndex; k++) { if (tmpres[k] != cpures[k] /*|| tmpres[k].imag != cpures[k].imag*/) { printf("Error detected with index-values: i0 = %d, i1 = %d!\n", index_0, index_1); printres(cpures, nIndex, "CPU results:"); printres(tmpres, nIndex, "GPU results:"); } } } if (print && (!cpurun)) { printres(tmpres, nIndex, "GPU results:"); } int size = index_1 - index_0; index_0 += 1; index_1 -= 1; if (index_0 > index_1 + 1) { int tmp = index_0; index_0 = index_1; index_1 = tmp; } if (index_0 < 0 || index_1 < 0) { index_0 = 0; index_1 = size - 1; } } free(tmpres); if (stress) free(cpures); } #if ENABLE_THRUST // NOTE: Take advantage here of the fact that this is the classical histogram with all values = 1 // And also that we know before hand the number of indices coming out static void testHistogramParamThrust(int* INPUT, int index_0, int index_1, bool print) { test_sumfun2 mysumfun; thrust::equal_to<int> binary_pred; int nIndex = TESTMAXIDX; int N = index_1 - index_0; thrust::device_vector<int> keys_out(nIndex); thrust::device_vector<int> vals_out(nIndex); thrust::device_vector<int> h_vals_out(nIndex); //thrust::device_vector<int> keys(N); thrust::device_ptr<int> keys(INPUT); // Sort the data thrust::sort(keys, keys + N); // And reduce by key - histogram complete thrust::reduce_by_key(keys, keys + N, thrust::make_constant_iterator(1), keys_out.begin(), vals_out.begin(), binary_pred, mysumfun); h_vals_out = vals_out; if (print) { printf("\nThrust results:\n"); printf("vals = [ "); for (int i = 0; i < nIndex; i++) { int tmp = h_vals_out[i]; printf("(%d), ", tmp); } printf("]\n"); } } #endif void printUsage(void) { printf("\n"); printf("Test order independent reduce-by-key / histogram algorithm\n\n"); printf("By default this runs on custom algorithm on the GPU, with lots of equal consecutive keys\n\n"); printf("\tOptions:\n\n"); printf("\t\t--cpu\t\t Run on CPU serially instead of GPU\n"); printf("\t\t--print\t\t Print results of algorithm (check validity)\n"); printf("\t\t--thrust\t Run on GPU but using thrust library\n"); printf("\t\t--load\t Use 32-bit texture data s\n"); printf("\t\t--rnd\t Take uniform random keys s\n"); // printf("\t\t--sharp\t Make peaks sharp\n"); // printf("\t\t--nornd\t Remove random noise from input\n"); } static unsigned int* MyTexture_load(char* filename, int* dataSize) { FILE* file = fopen(filename, "rb"); //texture->dataRGBA8888 = NULL; if (!file) { char* tmp = (char*)malloc(strlen(filename) + 10); if (tmp) { char* ptr = tmp; strcpy(ptr, "../"); ptr += 3; strcpy(ptr, filename); file = fopen(tmp, "rb"); } } // Read if (file) { int npixels = 512 * 512;//texture->width * texture->height; int size = npixels * 4; unsigned int* data = (unsigned int*)malloc(size); *dataSize = npixels; if (data) { int i; for (i = 0; i < npixels; i++) { unsigned int r, g, b; unsigned int raw = 0; unsigned int pixel = 0; int rsize = fread(&raw, 3, 1, file); if (rsize != 1) { printf( "Warning: Unexpected EOF in texture %s at idx %d\n", filename, i); break; } r = (raw & 0x00FF0000) >> 16; g = (raw & 0x0000FF00) >> 8; b = (raw & 0x000000FF) >> 0; pixel = 0xFF000000 | (b << 16) | (g << 8) | (r << 0); data[i] = pixel; } } fclose(file); return data; } return NULL; } static inline int getInput(size_t i, unsigned int* texData, int dataSize, bool rnd) { if (texData) { static size_t index = i % dataSize; static int round = 0; unsigned int val = texData[index]; int result; result = val + round; index++; if (index >= dataSize) { index = 0; round += 7; } #if TEST_IS_POW2 result = (int)(result); #else result = (int)(result); #endif return result; } else { static unsigned int current = 0xf1232345; const unsigned int mult = 1664525; const unsigned int add = 1013904223ul; //int peakWidth = sharp ? 100 : 10000; // static int nextPeak = 200; // static int currentBase = 0; // static int currentWidth = TESTMAXIDX; current = current * mult + add; if (!rnd) current = i / 100; #if TEST_IS_POW2 i = (int)(current); #else i = (int)(current); #endif return i; } } static void fillInput(int* input, bool load, bool rnd) { size_t i; unsigned int* texData = NULL; int dataSize = 0; if (load && !rnd) { texData = MyTexture_load("texture.raw", &dataSize); } for (i = 0; i < TEST_SIZE * 4;) { *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; *input++ = getInput(i++, texData, dataSize, rnd) % TESTMAXIDX; } if (texData) free(texData); } int main (int argc, char** argv) { int i; int index_0 = START_INDEX; int index_1 = index_0 + TEST_SIZE; bool cpu = false; bool print = false; bool thrust = false; bool stress = false; // bool peaks = false; // bool sharp = false; bool rnd = false; bool load = false; void* tmpbuf = NULL; printUsage(); for (i = 0; i < argc; i++) { if (argv[i] && strcmp(argv[i], "--cpu") == 0) cpu = true; if (argv[i] && strcmp(argv[i], "--print") == 0) print = true; if (argv[i] && strcmp(argv[i], "--thrust") == 0) thrust = true; if (argv[i] && strcmp(argv[i], "--stress") == 0) stress = true; // if (argv[i] && strcmp(argv[i], "--peaks") == 0) // peaks = true; if (argv[i] && strcmp(argv[i], "--load") == 0) load = true; // if (argv[i] && strcmp(argv[i], "--sharp") == 0) // sharp = true; if (argv[i] && strcmp(argv[i], "--rnd") == 0) rnd = true; } { // Allocate keys: int* INPUT = NULL; int* hostINPUT = (int*)malloc(4 * sizeof(int) * (TEST_SIZE + 3)); assert(hostINPUT); fillInput(hostINPUT, load, rnd); if (!cpu) { cudaMalloc(&INPUT, 4 * sizeof(int) * TEST_SIZE); assert(INPUT); cudaMemcpy(INPUT, hostINPUT, 4 * sizeof(int) * TEST_SIZE, cudaMemcpyHostToDevice); int tmpbufSize = getHistogramBufSize<histogram_atomic_inc>(0, TESTMAXIDX); cudaMalloc(&tmpbuf, tmpbufSize); } // Create events for timing: cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Now start timer - we run on stream 0 (default stream): cudaEventRecord(start, 0); for (i = 0; i < NRUNS; i++) { if (thrust) { #if ENABLE_THRUST testHistogramParamThrust(INPUT, index_0, 4*index_1, print); #else printf("\nTest was compiled without thrust support! Find 'ENABLE_THRUST' in source-code!\n\n Exiting...\n"); break; #endif } else { testHistogramParam((uint4*)INPUT, (uint4*)hostINPUT, index_0, index_1, print, cpu, stress, tmpbuf); } print = false; // Run only once all stress-tests if (stress) break; } { float t_ms; cudaEventRecord(stop, 0); cudaThreadSynchronize(); cudaEventElapsedTime(&t_ms, start, stop); double t = t_ms * 0.001f; double GKps = (((double)TEST_SIZE * (double)NRUNS * 4.0)) / (t*1.e9); printf("Runtime in loops: %fs, Throughput (Gkeys/s): %3f GK/s \n", t, GKps); } if (tmpbuf) cudaFree(tmpbuf); if (INPUT) cudaFree(INPUT); if (hostINPUT) free(hostINPUT); cudaEventDestroy(start); cudaEventDestroy(stop); } return 0; }
bb86ad767fdf84de145c0925071761de3fc1f858.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
bb86ad767fdf84de145c0925071761de3fc1f858.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<32>; using LayoutFilter = cutlass::layout::TensorCxRSKx<32>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<64, 128, 64>; using WarpShape = cutlass::gemm::GemmShape<32, 64, 64>; using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 8, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 16, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
0a4f55f54c7204641b30e804c818caf48008ce17.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2020 by Contributors * \file array/cpu/array_sort.cu * \brief Array sort GPU implementation */ #include <dgl/array.h> #include <hipcub/hipcub.hpp> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> std::pair<IdArray, IdArray> Sort(IdArray array, int num_bits) { const auto& ctx = array->ctx; auto device = runtime::DeviceAPI::Get(ctx); const int64_t nitems = array->shape[0]; IdArray orig_idx = Range(0, nitems, 64, ctx); IdArray sorted_array = NewIdArray(nitems, ctx, array->dtype.bits); IdArray sorted_idx = NewIdArray(nitems, ctx, 64); const IdType* keys_in = array.Ptr<IdType>(); const int64_t* values_in = orig_idx.Ptr<int64_t>(); IdType* keys_out = sorted_array.Ptr<IdType>(); int64_t* values_out = sorted_idx.Ptr<int64_t>(); if (num_bits == 0) { num_bits = sizeof(IdType)*8; } // Allocate workspace size_t workspace_size = 0; hipcub::DeviceRadixSort::SortPairs(nullptr, workspace_size, keys_in, keys_out, values_in, values_out, nitems, 0, num_bits); void* workspace = device->AllocWorkspace(ctx, workspace_size); // Compute hipcub::DeviceRadixSort::SortPairs(workspace, workspace_size, keys_in, keys_out, values_in, values_out, nitems, 0, num_bits); device->FreeWorkspace(ctx, workspace); return std::make_pair(sorted_array, sorted_idx); } template std::pair<IdArray, IdArray> Sort<kDLGPU, int32_t>(IdArray, int num_bits); template std::pair<IdArray, IdArray> Sort<kDLGPU, int64_t>(IdArray, int num_bits); } // namespace impl } // namespace aten } // namespace dgl
0a4f55f54c7204641b30e804c818caf48008ce17.cu
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/array_sort.cu * \brief Array sort GPU implementation */ #include <dgl/array.h> #include <cub/cub.cuh> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <DLDeviceType XPU, typename IdType> std::pair<IdArray, IdArray> Sort(IdArray array, int num_bits) { const auto& ctx = array->ctx; auto device = runtime::DeviceAPI::Get(ctx); const int64_t nitems = array->shape[0]; IdArray orig_idx = Range(0, nitems, 64, ctx); IdArray sorted_array = NewIdArray(nitems, ctx, array->dtype.bits); IdArray sorted_idx = NewIdArray(nitems, ctx, 64); const IdType* keys_in = array.Ptr<IdType>(); const int64_t* values_in = orig_idx.Ptr<int64_t>(); IdType* keys_out = sorted_array.Ptr<IdType>(); int64_t* values_out = sorted_idx.Ptr<int64_t>(); if (num_bits == 0) { num_bits = sizeof(IdType)*8; } // Allocate workspace size_t workspace_size = 0; cub::DeviceRadixSort::SortPairs(nullptr, workspace_size, keys_in, keys_out, values_in, values_out, nitems, 0, num_bits); void* workspace = device->AllocWorkspace(ctx, workspace_size); // Compute cub::DeviceRadixSort::SortPairs(workspace, workspace_size, keys_in, keys_out, values_in, values_out, nitems, 0, num_bits); device->FreeWorkspace(ctx, workspace); return std::make_pair(sorted_array, sorted_idx); } template std::pair<IdArray, IdArray> Sort<kDLGPU, int32_t>(IdArray, int num_bits); template std::pair<IdArray, IdArray> Sort<kDLGPU, int64_t>(IdArray, int num_bits); } // namespace impl } // namespace aten } // namespace dgl
88220fdc1a5240eace969517756256291bb828a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CUJ2K - JPEG2000 Encoder on CUDA http://cuj2k.sourceforge.net/ Copyright (c) 2009 Norbert Fuerst, Martin Heide, Armin Weiss, Simon Papandreou, Ana Balevic Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Picture preprocessing operations for JPEG2000 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "pic_preprocessing.h" //#include <cutil.h> //#include <cutil_inline.h> #include <helper_cuda.h> #include <helper_timer.h> //sets pointers, but doesn't copy any data //TODO: Free pic->original_d on device at the end of processing void tiling (struct Picture *pic, struct Bitmap *img, int cb_dim) { //struct Picture *pic = NULL; struct Tile *tile = NULL; int tilesize,size, chan; int tile_numberx, tile_numbery,tile_number_total, tileIdx; int xpos, ypos, xstep, ystep; int xDim =img->xDim, yDim =img->yDim; if(cb_dim == 0) { //set size automatically //set codeblock size and according tilesize // => gain more speed with small pictures if(xDim <= 256 && yDim <= 256) { //700^2, tiles 256 pic->cb_xdim_exp = 4; //16x16 } else if(xDim <= 512 && yDim <= 512) { //1500^2, tiles 512 pic->cb_xdim_exp = 5; //32x32 } else { pic->cb_xdim_exp = 6; //64x64 } tilesize = 1024; } else { //user-defined codeblock size switch(cb_dim) { case 16: pic->cb_xdim_exp=4; tilesize = 256; break; case 32: pic->cb_xdim_exp=5; tilesize = 512; break; default: //64 pic->cb_xdim_exp=6; tilesize = 1024; } } pic->cb_ydim_exp = pic->cb_xdim_exp; pic->tilesize = tilesize; xstep = tilesize; ystep = tilesize; /*Calculation of tile number to allocate memory*/ tile_numberx = (xDim + (tilesize - 1))/tilesize; tile_numbery = (yDim + (tilesize - 1))/tilesize; tile_number_total = tile_numberx * tile_numbery; if(tile_number_total > pic->tile_number_alloc) { //printf("alloc tiles\n"); free(pic->tiles); tile = (struct Tile*) malloc (tile_number_total * sizeof (struct Tile)); pic->tiles = tile; pic->tile_number_alloc = tile_number_total; } else tile = pic->tiles; int *imgData_d[3]; size = 3* xDim*yDim*sizeof(int); if(xDim*yDim > pic->area_alloc) { //printf("tiling: free old dev.mem\n"); checkCudaErrors(hipFree(pic->device_mem)); //printf("tiling: malloc dev.mem\n"); checkCudaErrors(hipMalloc((void **) &imgData_d[0], size)); pic->device_mem = (void*)imgData_d[0]; pic->area_alloc = xDim*yDim; } else imgData_d[0] = (int*)(pic->device_mem); imgData_d[1]= &(imgData_d[0][xDim*yDim]); imgData_d[2]= &(imgData_d[1][xDim*yDim]); pic->xSize = img->xDim; pic->ySize = img->yDim; pic->channels = img->channels; //used to malloc memory on device for original data //Transfering data from original image to container //pic = (struct Picture*) malloc (sizeof (struct Picture)); //checkCudaErrors(hipMalloc((void **) &imgData_d[0], size)); //work variables xpos=0; ypos=0; tileIdx = 0; /*move (xpos/ypos) from tile to tile (always points at top left corner)*/ for (ypos=0; ypos<yDim; ypos +=ystep){ /*step can not go over image boundaries*/ if ((ypos+tilesize) > yDim) ystep = yDim - ypos; else ystep = tilesize; for (xpos=0; xpos<xDim; xpos +=xstep){ /*step can not go over image boundaries*/ if ((xpos+tilesize) > xDim) xstep = xDim - xpos; else xstep = tilesize; //transfering all the tile data to current tile tile[tileIdx].xDim = xstep; tile[tileIdx].yDim = ystep; tile[tileIdx].xPos = xpos; tile[tileIdx].yPos = ypos; tile[tileIdx].channels = img->channels; tile[tileIdx].QS = 0x22; //standard set to irreversible //setting pointer to original data array for (chan=0;chan < pic->channels;chan++){ tile[tileIdx].imgData_d[chan] = &(imgData_d[chan][xpos+ypos*xDim]); } pic->tile_number = tileIdx + 1; //min 1 tile tileIdx++; } } //return pic; } void tiling2 (struct Picture *pic, struct simpleTIFF *img, int cb_dim) { //struct Picture *pic = NULL; struct Tile *tile = NULL; int tilesize,size, chan; int tile_numberx, tile_numbery,tile_number_total, tileIdx; int xpos, ypos, xstep, ystep; int xDim =img->xDim, yDim =img->yDim; if(cb_dim == 0) { //set size automatically //set codeblock size and according tilesize // => gain more speed with small pictures if(xDim <= 256 && yDim <= 256) { //700^2, tiles 256 pic->cb_xdim_exp = 4; //16x16 } else if(xDim <= 512 && yDim <= 512) { //1500^2, tiles 512 pic->cb_xdim_exp = 5; //32x32 } else { pic->cb_xdim_exp = 6; //64x64 } tilesize = 1024; } else { //user-defined codeblock size switch(cb_dim) { case 16: pic->cb_xdim_exp=4; tilesize = 256; break; case 32: pic->cb_xdim_exp=5; tilesize = 512; break; default: //64 pic->cb_xdim_exp=6; tilesize = 1024; } } pic->cb_ydim_exp = pic->cb_xdim_exp; pic->tilesize = tilesize; xstep = tilesize; ystep = tilesize; /*Calculation of tile number to allocate memory*/ tile_numberx = (xDim + (tilesize - 1))/tilesize; tile_numbery = (yDim + (tilesize - 1))/tilesize; tile_number_total = tile_numberx * tile_numbery; if(tile_number_total > pic->tile_number_alloc) { //printf("alloc tiles\n"); free(pic->tiles); tile = (struct Tile*) malloc (tile_number_total * sizeof (struct Tile)); pic->tiles = tile; pic->tile_number_alloc = tile_number_total; } else tile = pic->tiles; int *imgData_d[3]; size = 3* xDim*yDim*sizeof(int); if(xDim*yDim > pic->area_alloc) { //printf("tiling: free old dev.mem\n"); checkCudaErrors(hipFree(pic->device_mem)); //printf("tiling: malloc dev.mem\n"); checkCudaErrors(hipMalloc((void **) &imgData_d[0], size)); pic->device_mem = (void*)imgData_d[0]; pic->area_alloc = xDim*yDim; } else imgData_d[0] = (int*)(pic->device_mem); imgData_d[1]= &(imgData_d[0][xDim*yDim]); imgData_d[2]= &(imgData_d[1][xDim*yDim]); pic->xSize = img->xDim; pic->ySize = img->yDim; pic->channels = img->channels; //used to malloc memory on device for original data //Transfering data from original image to container //pic = (struct Picture*) malloc (sizeof (struct Picture)); //checkCudaErrors(hipMalloc((void **) &imgData_d[0], size)); //work variables xpos=0; ypos=0; tileIdx = 0; /*move (xpos/ypos) from tile to tile (always points at top left corner)*/ for (ypos=0; ypos<yDim; ypos +=ystep){ /*step can not go over image boundaries*/ if ((ypos+tilesize) > yDim) ystep = yDim - ypos; else ystep = tilesize; for (xpos=0; xpos<xDim; xpos +=xstep){ /*step can not go over image boundaries*/ if ((xpos+tilesize) > xDim) xstep = xDim - xpos; else xstep = tilesize; //transfering all the tile data to current tile tile[tileIdx].xDim = xstep; tile[tileIdx].yDim = ystep; tile[tileIdx].xPos = xpos; tile[tileIdx].yPos = ypos; tile[tileIdx].channels = img->channels; tile[tileIdx].QS = 0x22; //standard set to irreversible //setting pointer to original data array for (chan=0;chan < pic->channels;chan++){ tile[tileIdx].imgData_d[chan] = &(imgData_d[chan][xpos+ypos*xDim]); } pic->tile_number = tileIdx + 1; //min 1 tile tileIdx++; } } //return pic; } #define MAX_BLOCKS 65000 //Kernel for dcshift + reversible color transform __global__ void rct_kernel(int *imgData_0,int *imgData_1,int *imgData_2, int range, int shift, int tile) { int n = threadIdx.x + blockIdx.x*256 + tile*MAX_BLOCKS*256; //proceeding linewise if (n<range){ //more threads than pixels, therefore check if in range int Y,U,V; //DC-Shift imgData_0[n]= (int)((int)imgData_0[n] - shift); imgData_1[n]= (int)((int)imgData_1[n] - shift); imgData_2[n]= (int)((int)imgData_2[n] - shift); /*RCT: R=imgData_i_0[n] G=imgData_i_1[n] B=imgData_i_2[n] */ Y = (imgData_0[n] + 2*imgData_1[n] + imgData_2[n])>>2; //little tweak, instead of floor(../4) U = imgData_2[n] - imgData_1[n]; V = imgData_0[n] - imgData_1[n]; imgData_0[n] = Y; imgData_1[n] = U; imgData_2[n] = V; } } //Kernel for dcshift + irreversible color transform __global__ void ict_kernel(int *imgData_0,int *imgData_1,int *imgData_2, int range, int tile) { int n = threadIdx.x + blockIdx.x*256 + tile*MAX_BLOCKS*256; //proceeding linewise if (n<range){ //more threads than pixels, therefore check if in range float Y,C_r,C_b; float *imgData_f_0; float *imgData_f_1; float *imgData_f_2; //DC-Shift imgData_0[n]= (int) ((int)imgData_0[n] - 128); imgData_1[n]= (int) ((int)imgData_1[n] - 128); imgData_2[n]= (int) ((int)imgData_2[n] - 128); imgData_f_0 = (float*)imgData_0; imgData_f_1 = (float*)imgData_1; imgData_f_2 = (float*)imgData_2; /*ICT: R=imgData_f_0[n] G=imgData_f_1[n] B=imgData_f_2[n] */ Y = (0.299f*(float)imgData_0[n] + 0.587f*(float)imgData_1[n] + 0.114f*(float)imgData_2[n]); C_r = (-0.16875f*(float)imgData_0[n] - 0.33126f*(float)imgData_1[n] + 0.5f*(float)imgData_2[n]); C_b = (0.5f*(float)imgData_0[n] + (-0.41869f*(float)imgData_1[n]) - 0.08131f*(float)imgData_2[n]); imgData_f_0[n] = Y; imgData_f_1[n] = C_r; imgData_f_2[n] = C_b; } } void dcshift_mct (struct Picture *pic, int mode, int bps, hipStream_t stream){ int pixels = pic->ySize * pic->xSize; int rangecheck = pic->ySize * pic->xSize; int blockmultiple; int gridDim; int processed_per_kernel; //pointers for original data int *imgData_0 = (int*) pic->tiles[0].imgData_d[0]; int *imgData_1 = (int*) pic->tiles[0].imgData_d[1]; int *imgData_2 = (int*) pic->tiles[0].imgData_d[2]; int shift = (int)(pow(2.0, (float)bps)); shift = shift /2; for(int tileIdx=0;pixels > 0;tileIdx++){ blockmultiple = (int) ceil((double)pixels/(double)(256*MAX_BLOCKS)); if (blockmultiple > 1) gridDim = MAX_BLOCKS; else gridDim=(int) ceil((double)pixels/256); processed_per_kernel = gridDim*256; //processed sequentially //kernel dimensions dim3 dimGrid(gridDim); dim3 dimBlock(256); //256 Threads for best gpu occupancy, compare cuda occupancy calculator //kernel calls if (mode == LOSSLESS) hipLaunchKernelGGL(( rct_kernel), dim3(dimGrid), dim3(dimBlock),0, stream , imgData_0,imgData_1,imgData_2,rangecheck,shift,tileIdx); else hipLaunchKernelGGL(( ict_kernel), dim3(dimGrid), dim3(dimBlock),0, stream , imgData_0,imgData_1,imgData_2,rangecheck,tileIdx); pixels -= processed_per_kernel; } }
88220fdc1a5240eace969517756256291bb828a3.cu
/* CUJ2K - JPEG2000 Encoder on CUDA http://cuj2k.sourceforge.net/ Copyright (c) 2009 Norbert Fuerst, Martin Heide, Armin Weiss, Simon Papandreou, Ana Balevic Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Picture preprocessing operations for JPEG2000 */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "pic_preprocessing.h" //#include <cutil.h> //#include <cutil_inline.h> #include <helper_cuda.h> #include <helper_timer.h> //sets pointers, but doesn't copy any data //TODO: Free pic->original_d on device at the end of processing void tiling (struct Picture *pic, struct Bitmap *img, int cb_dim) { //struct Picture *pic = NULL; struct Tile *tile = NULL; int tilesize,size, chan; int tile_numberx, tile_numbery,tile_number_total, tileIdx; int xpos, ypos, xstep, ystep; int xDim =img->xDim, yDim =img->yDim; if(cb_dim == 0) { //set size automatically //set codeblock size and according tilesize // => gain more speed with small pictures if(xDim <= 256 && yDim <= 256) { //700^2, tiles 256 pic->cb_xdim_exp = 4; //16x16 } else if(xDim <= 512 && yDim <= 512) { //1500^2, tiles 512 pic->cb_xdim_exp = 5; //32x32 } else { pic->cb_xdim_exp = 6; //64x64 } tilesize = 1024; } else { //user-defined codeblock size switch(cb_dim) { case 16: pic->cb_xdim_exp=4; tilesize = 256; break; case 32: pic->cb_xdim_exp=5; tilesize = 512; break; default: //64 pic->cb_xdim_exp=6; tilesize = 1024; } } pic->cb_ydim_exp = pic->cb_xdim_exp; pic->tilesize = tilesize; xstep = tilesize; ystep = tilesize; /*Calculation of tile number to allocate memory*/ tile_numberx = (xDim + (tilesize - 1))/tilesize; tile_numbery = (yDim + (tilesize - 1))/tilesize; tile_number_total = tile_numberx * tile_numbery; if(tile_number_total > pic->tile_number_alloc) { //printf("alloc tiles\n"); free(pic->tiles); tile = (struct Tile*) malloc (tile_number_total * sizeof (struct Tile)); pic->tiles = tile; pic->tile_number_alloc = tile_number_total; } else tile = pic->tiles; int *imgData_d[3]; size = 3* xDim*yDim*sizeof(int); if(xDim*yDim > pic->area_alloc) { //printf("tiling: free old dev.mem\n"); checkCudaErrors(cudaFree(pic->device_mem)); //printf("tiling: malloc dev.mem\n"); checkCudaErrors(cudaMalloc((void **) &imgData_d[0], size)); pic->device_mem = (void*)imgData_d[0]; pic->area_alloc = xDim*yDim; } else imgData_d[0] = (int*)(pic->device_mem); imgData_d[1]= &(imgData_d[0][xDim*yDim]); imgData_d[2]= &(imgData_d[1][xDim*yDim]); pic->xSize = img->xDim; pic->ySize = img->yDim; pic->channels = img->channels; //used to malloc memory on device for original data //Transfering data from original image to container //pic = (struct Picture*) malloc (sizeof (struct Picture)); //checkCudaErrors(cudaMalloc((void **) &imgData_d[0], size)); //work variables xpos=0; ypos=0; tileIdx = 0; /*move (xpos/ypos) from tile to tile (always points at top left corner)*/ for (ypos=0; ypos<yDim; ypos +=ystep){ /*step can not go over image boundaries*/ if ((ypos+tilesize) > yDim) ystep = yDim - ypos; else ystep = tilesize; for (xpos=0; xpos<xDim; xpos +=xstep){ /*step can not go over image boundaries*/ if ((xpos+tilesize) > xDim) xstep = xDim - xpos; else xstep = tilesize; //transfering all the tile data to current tile tile[tileIdx].xDim = xstep; tile[tileIdx].yDim = ystep; tile[tileIdx].xPos = xpos; tile[tileIdx].yPos = ypos; tile[tileIdx].channels = img->channels; tile[tileIdx].QS = 0x22; //standard set to irreversible //setting pointer to original data array for (chan=0;chan < pic->channels;chan++){ tile[tileIdx].imgData_d[chan] = &(imgData_d[chan][xpos+ypos*xDim]); } pic->tile_number = tileIdx + 1; //min 1 tile tileIdx++; } } //return pic; } void tiling2 (struct Picture *pic, struct simpleTIFF *img, int cb_dim) { //struct Picture *pic = NULL; struct Tile *tile = NULL; int tilesize,size, chan; int tile_numberx, tile_numbery,tile_number_total, tileIdx; int xpos, ypos, xstep, ystep; int xDim =img->xDim, yDim =img->yDim; if(cb_dim == 0) { //set size automatically //set codeblock size and according tilesize // => gain more speed with small pictures if(xDim <= 256 && yDim <= 256) { //700^2, tiles 256 pic->cb_xdim_exp = 4; //16x16 } else if(xDim <= 512 && yDim <= 512) { //1500^2, tiles 512 pic->cb_xdim_exp = 5; //32x32 } else { pic->cb_xdim_exp = 6; //64x64 } tilesize = 1024; } else { //user-defined codeblock size switch(cb_dim) { case 16: pic->cb_xdim_exp=4; tilesize = 256; break; case 32: pic->cb_xdim_exp=5; tilesize = 512; break; default: //64 pic->cb_xdim_exp=6; tilesize = 1024; } } pic->cb_ydim_exp = pic->cb_xdim_exp; pic->tilesize = tilesize; xstep = tilesize; ystep = tilesize; /*Calculation of tile number to allocate memory*/ tile_numberx = (xDim + (tilesize - 1))/tilesize; tile_numbery = (yDim + (tilesize - 1))/tilesize; tile_number_total = tile_numberx * tile_numbery; if(tile_number_total > pic->tile_number_alloc) { //printf("alloc tiles\n"); free(pic->tiles); tile = (struct Tile*) malloc (tile_number_total * sizeof (struct Tile)); pic->tiles = tile; pic->tile_number_alloc = tile_number_total; } else tile = pic->tiles; int *imgData_d[3]; size = 3* xDim*yDim*sizeof(int); if(xDim*yDim > pic->area_alloc) { //printf("tiling: free old dev.mem\n"); checkCudaErrors(cudaFree(pic->device_mem)); //printf("tiling: malloc dev.mem\n"); checkCudaErrors(cudaMalloc((void **) &imgData_d[0], size)); pic->device_mem = (void*)imgData_d[0]; pic->area_alloc = xDim*yDim; } else imgData_d[0] = (int*)(pic->device_mem); imgData_d[1]= &(imgData_d[0][xDim*yDim]); imgData_d[2]= &(imgData_d[1][xDim*yDim]); pic->xSize = img->xDim; pic->ySize = img->yDim; pic->channels = img->channels; //used to malloc memory on device for original data //Transfering data from original image to container //pic = (struct Picture*) malloc (sizeof (struct Picture)); //checkCudaErrors(cudaMalloc((void **) &imgData_d[0], size)); //work variables xpos=0; ypos=0; tileIdx = 0; /*move (xpos/ypos) from tile to tile (always points at top left corner)*/ for (ypos=0; ypos<yDim; ypos +=ystep){ /*step can not go over image boundaries*/ if ((ypos+tilesize) > yDim) ystep = yDim - ypos; else ystep = tilesize; for (xpos=0; xpos<xDim; xpos +=xstep){ /*step can not go over image boundaries*/ if ((xpos+tilesize) > xDim) xstep = xDim - xpos; else xstep = tilesize; //transfering all the tile data to current tile tile[tileIdx].xDim = xstep; tile[tileIdx].yDim = ystep; tile[tileIdx].xPos = xpos; tile[tileIdx].yPos = ypos; tile[tileIdx].channels = img->channels; tile[tileIdx].QS = 0x22; //standard set to irreversible //setting pointer to original data array for (chan=0;chan < pic->channels;chan++){ tile[tileIdx].imgData_d[chan] = &(imgData_d[chan][xpos+ypos*xDim]); } pic->tile_number = tileIdx + 1; //min 1 tile tileIdx++; } } //return pic; } #define MAX_BLOCKS 65000 //Kernel for dcshift + reversible color transform __global__ void rct_kernel(int *imgData_0,int *imgData_1,int *imgData_2, int range, int shift, int tile) { int n = threadIdx.x + blockIdx.x*256 + tile*MAX_BLOCKS*256; //proceeding linewise if (n<range){ //more threads than pixels, therefore check if in range int Y,U,V; //DC-Shift imgData_0[n]= (int)((int)imgData_0[n] - shift); imgData_1[n]= (int)((int)imgData_1[n] - shift); imgData_2[n]= (int)((int)imgData_2[n] - shift); /*RCT: R=imgData_i_0[n] G=imgData_i_1[n] B=imgData_i_2[n] */ Y = (imgData_0[n] + 2*imgData_1[n] + imgData_2[n])>>2; //little tweak, instead of floor(../4) U = imgData_2[n] - imgData_1[n]; V = imgData_0[n] - imgData_1[n]; imgData_0[n] = Y; imgData_1[n] = U; imgData_2[n] = V; } } //Kernel for dcshift + irreversible color transform __global__ void ict_kernel(int *imgData_0,int *imgData_1,int *imgData_2, int range, int tile) { int n = threadIdx.x + blockIdx.x*256 + tile*MAX_BLOCKS*256; //proceeding linewise if (n<range){ //more threads than pixels, therefore check if in range float Y,C_r,C_b; float *imgData_f_0; float *imgData_f_1; float *imgData_f_2; //DC-Shift imgData_0[n]= (int) ((int)imgData_0[n] - 128); imgData_1[n]= (int) ((int)imgData_1[n] - 128); imgData_2[n]= (int) ((int)imgData_2[n] - 128); imgData_f_0 = (float*)imgData_0; imgData_f_1 = (float*)imgData_1; imgData_f_2 = (float*)imgData_2; /*ICT: R=imgData_f_0[n] G=imgData_f_1[n] B=imgData_f_2[n] */ Y = (0.299f*(float)imgData_0[n] + 0.587f*(float)imgData_1[n] + 0.114f*(float)imgData_2[n]); C_r = (-0.16875f*(float)imgData_0[n] - 0.33126f*(float)imgData_1[n] + 0.5f*(float)imgData_2[n]); C_b = (0.5f*(float)imgData_0[n] + (-0.41869f*(float)imgData_1[n]) - 0.08131f*(float)imgData_2[n]); imgData_f_0[n] = Y; imgData_f_1[n] = C_r; imgData_f_2[n] = C_b; } } void dcshift_mct (struct Picture *pic, int mode, int bps, cudaStream_t stream){ int pixels = pic->ySize * pic->xSize; int rangecheck = pic->ySize * pic->xSize; int blockmultiple; int gridDim; int processed_per_kernel; //pointers for original data int *imgData_0 = (int*) pic->tiles[0].imgData_d[0]; int *imgData_1 = (int*) pic->tiles[0].imgData_d[1]; int *imgData_2 = (int*) pic->tiles[0].imgData_d[2]; int shift = (int)(pow(2.0, (float)bps)); shift = shift /2; for(int tileIdx=0;pixels > 0;tileIdx++){ blockmultiple = (int) ceil((double)pixels/(double)(256*MAX_BLOCKS)); if (blockmultiple > 1) gridDim = MAX_BLOCKS; else gridDim=(int) ceil((double)pixels/256); processed_per_kernel = gridDim*256; //processed sequentially //kernel dimensions dim3 dimGrid(gridDim); dim3 dimBlock(256); //256 Threads for best gpu occupancy, compare cuda occupancy calculator //kernel calls if (mode == LOSSLESS) rct_kernel<<< dimGrid, dimBlock,0, stream >>>(imgData_0,imgData_1,imgData_2,rangecheck,shift,tileIdx); else ict_kernel<<< dimGrid, dimBlock,0, stream >>>(imgData_0,imgData_1,imgData_2,rangecheck,tileIdx); pixels -= processed_per_kernel; } }
c59ba63592a9cbd4cb92e60e135b230737829014.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "update_part_props.cuh" #include "fill.cuh" #include <catboost/libs/cuda_wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> namespace NKernel { template <int BlockSize, int N = 1> __forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) { float4 sum; sum.x = sum.y = sum.z = sum.w = 0; stat += offset; const int warpSize = 32; const int alignSize = 4 * warpSize; { int lastId = min(size, alignSize - (offset % alignSize)); if (blockIdx == 0) { if (threadIdx.x < lastId) { sum.x += Ldg(stat + threadIdx.x); } } size = max(size - lastId, 0); stat += lastId; } //now lets align end const int unalignedTail = (size % alignSize); if (unalignedTail != 0) { if (blockIdx == 0) { const int tailOffset = size - unalignedTail; if (threadIdx.x < unalignedTail) { sum.y += Ldg(stat + tailOffset + threadIdx.x); } } } size -= unalignedTail; const int entriesPerWarp = warpSize * 4; const int warpsPerBlock = (BlockSize / 32); const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32); stat += globalWarpId * entriesPerWarp; size = max(size - globalWarpId * entriesPerWarp, 0); const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount; const int localIdx = (threadIdx.x & 31) * 4; const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize; stat += localIdx; if (size > 0) { #pragma unroll N for (int i = 0; i < iterCount; ++i) { const float4* stat4 = (const float4*) stat; float4 val = Ldg(stat4); sum.x += val.x; sum.y += val.y; sum.z += val.z; sum.w += val.w; stat += stripeSize; } } return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w; }; template <class TOutput> __global__ void SaveResultsImpl(const ui32* partIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = partIds != nullptr ? partIds[y] : y; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += tempVars[i]; tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets, const float* source, ui64 statLineSize, ui32 partCount, double* statPartSums) { ui32 partId = blockIdx.y; const ui32 statId = blockIdx.z; source += statId * statLineSize; while (partId < partCount) { const ui32 partOffset = __ldg(offsets + partId); const ui32 partSize = __ldg(offsets + partId + 1) - partOffset; __shared__ volatile double localBuffer[BlockSize]; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, partOffset, partSize, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { const int statCount = gridDim.z; const int lineSize = statCount * partCount; ui64 idx = blockIdx.x * lineSize + partId * statCount + statId; statPartSums[idx] = result; } partId += gridDim.y; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsImpl(const ui32* partIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 leafId = partIds[blockIdx.y]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum <BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } void UpdatePartitionsProps(const TDataPartition* parts, const ui32* partIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, partIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, partIds, tempVars, partCount, statCount, numBlocks.x, statSums); } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForSplitImpl(const ui32* leftPartIds, const ui32* rightPartIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 sourcePartCount = gridDim.y / 2; const bool isLeft = blockIdx.y < sourcePartCount; const ui32* partIds = isLeft ? leftPartIds : rightPartIds; const ui32 leafId = partIds[isLeft ? blockIdx.y : blockIdx.y - sourcePartCount]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForSingleSplitImpl(const ui32 leftPartId, const ui32 rightPartId, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { // const ui32 sourcePartCount = 2; const bool isLeft = blockIdx.y == 0; const ui32 leafId = isLeft ? leftPartId : rightPartId; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } template <class TOutput> __global__ void SaveResultsForSplitImpl(const ui32* leftPartIds, const ui32* rightPartIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = (y < partCount / 2) ? leftPartIds[y] : rightPartIds[y - partCount / 2]; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += __ldg(tempVars + i); tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <class TOutput> __global__ void SaveResultsForSingleSplitImpl(const ui32 leftPartId, const ui32 rightPartId, const double* tempVars, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < 2 * statCount) { const ui32 leafId = y == 0 ? leftPartId : rightPartId; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += __ldg(tempVars + i); tempVars += statCount * 2; } statSums[leafId * statCount + statId] = total; } } void UpdatePartitionsPropsForSplit(const TDataPartition* parts, const ui32* leftPartIds, const ui32* rightPartIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = 2 * partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsForSplitImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, leftPartIds, rightPartIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsForSplitImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, leftPartIds, rightPartIds, tempVars, 2 * partCount, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForSingleSplit(const TDataPartition* parts, const ui32 leftPartId, const ui32 rightPartId, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = 2; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsForSingleSplitImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, leftPartId, rightPartId, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsForSingleSplitImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, leftPartId, rightPartId, tempVars, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = min(count, 65535); numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); hipLaunchKernelGGL(( UpdatePartitionsPropsForOffsetsImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, offsets, source, statLineSize, count, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize; hipLaunchKernelGGL(( SaveResultsImpl), dim3(numSaveBlocks), dim3(saveBlockSize), 0, stream, nullptr, tempVars, count, statCount, numBlocks.x, statSums); } } __global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { WriteThrough(dst + i, (double)__ldg(src + i)); } } void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) { const ui32 blockSize = 128; const ui32 numBlocks = CeilDivide(size, blockSize); if (numBlocks) { hipLaunchKernelGGL(( FloatToDoubleImpl), dim3(numBlocks), dim3(blockSize), 0, stream, src, size, dst); } } ui32 GetTempVarsCount(ui32 statCount, ui32 count) { return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count; } }
c59ba63592a9cbd4cb92e60e135b230737829014.cu
#include "update_part_props.cuh" #include "fill.cuh" #include <catboost/libs/cuda_wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <catboost/cuda/cuda_util/gpu_data/partitions.h> namespace NKernel { template <int BlockSize, int N = 1> __forceinline__ __device__ double ComputeSum(const float* __restrict__ stat, ui32 offset, ui32 size, int blockIdx, int blockCount) { float4 sum; sum.x = sum.y = sum.z = sum.w = 0; stat += offset; const int warpSize = 32; const int alignSize = 4 * warpSize; { int lastId = min(size, alignSize - (offset % alignSize)); if (blockIdx == 0) { if (threadIdx.x < lastId) { sum.x += Ldg(stat + threadIdx.x); } } size = max(size - lastId, 0); stat += lastId; } //now lets align end const int unalignedTail = (size % alignSize); if (unalignedTail != 0) { if (blockIdx == 0) { const int tailOffset = size - unalignedTail; if (threadIdx.x < unalignedTail) { sum.y += Ldg(stat + tailOffset + threadIdx.x); } } } size -= unalignedTail; const int entriesPerWarp = warpSize * 4; const int warpsPerBlock = (BlockSize / 32); const int globalWarpId = (blockIdx * warpsPerBlock) + (threadIdx.x / 32); stat += globalWarpId * entriesPerWarp; size = max(size - globalWarpId * entriesPerWarp, 0); const int stripeSize = entriesPerWarp * warpsPerBlock * blockCount; const int localIdx = (threadIdx.x & 31) * 4; const int iterCount = (size - localIdx + stripeSize - 1) / stripeSize; stat += localIdx; if (size > 0) { #pragma unroll N for (int i = 0; i < iterCount; ++i) { const float4* stat4 = (const float4*) stat; float4 val = Ldg(stat4); sum.x += val.x; sum.y += val.y; sum.z += val.z; sum.w += val.w; stat += stripeSize; } } return (double)sum.x + (double)sum.y + (double)sum.z + (double)sum.w; }; template <class TOutput> __global__ void SaveResultsImpl(const ui32* partIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = partIds != nullptr ? partIds[y] : y; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += tempVars[i]; tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForOffsetsImpl(const ui32* offsets, const float* source, ui64 statLineSize, ui32 partCount, double* statPartSums) { ui32 partId = blockIdx.y; const ui32 statId = blockIdx.z; source += statId * statLineSize; while (partId < partCount) { const ui32 partOffset = __ldg(offsets + partId); const ui32 partSize = __ldg(offsets + partId + 1) - partOffset; __shared__ volatile double localBuffer[BlockSize]; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (partSize + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum < BlockSize > (source, partOffset, partSize, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { const int statCount = gridDim.z; const int lineSize = statCount * partCount; ui64 idx = blockIdx.x * lineSize + partId * statCount + statId; statPartSums[idx] = result; } partId += gridDim.y; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsImpl(const ui32* partIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 leafId = partIds[blockIdx.y]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize * 16; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum <BlockSize>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } void UpdatePartitionsProps(const TDataPartition* parts, const ui32* partIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(partIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(partIds, tempVars, partCount, statCount, numBlocks.x, statSums); } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForSplitImpl(const ui32* leftPartIds, const ui32* rightPartIds, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { const ui32 sourcePartCount = gridDim.y / 2; const bool isLeft = blockIdx.y < sourcePartCount; const ui32* partIds = isLeft ? leftPartIds : rightPartIds; const ui32 leafId = partIds[isLeft ? blockIdx.y : blockIdx.y - sourcePartCount]; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } template <int BlockSize> __launch_bounds__(BlockSize, 2) __global__ void UpdatePartitionsPropsForSingleSplitImpl(const ui32 leftPartId, const ui32 rightPartId, const TDataPartition* parts, const float* source, ui64 statLineSize, double* tempVars) { // const ui32 sourcePartCount = 2; const bool isLeft = blockIdx.y == 0; const ui32 leafId = isLeft ? leftPartId : rightPartId; TDataPartition part = parts[leafId]; const ui32 statId = blockIdx.z; __shared__ volatile double localBuffer[BlockSize]; source += statId * statLineSize; const int minDocsPerBlock = BlockSize; const int effectiveBlockCount = min(gridDim.x, (part.Size + minDocsPerBlock - 1) / minDocsPerBlock); double result = 0; if (blockIdx.x < effectiveBlockCount) { const int blockId = blockIdx.x % effectiveBlockCount; localBuffer[threadIdx.x] = ComputeSum<BlockSize, 4>(source, part.Offset, part.Size, blockId, effectiveBlockCount); __syncthreads(); result = FastInBlockReduce(threadIdx.x, localBuffer, BlockSize); } if (threadIdx.x == 0) { tempVars[gridDim.z * gridDim.y * blockIdx.x + blockIdx.y * gridDim.z + blockIdx.z] = result; } } template <class TOutput> __global__ void SaveResultsForSplitImpl(const ui32* leftPartIds, const ui32* rightPartIds, const double* tempVars, ui32 partCount, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < partCount * statCount) { const ui32 leafId = (y < partCount / 2) ? leftPartIds[y] : rightPartIds[y - partCount / 2]; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += __ldg(tempVars + i); tempVars += statCount * partCount; } statSums[leafId * statCount + statId] = total; } } template <class TOutput> __global__ void SaveResultsForSingleSplitImpl(const ui32 leftPartId, const ui32 rightPartId, const double* tempVars, ui32 statCount, int tempVarsBlockCount, TOutput* statSums) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; const ui32 statId = i % statCount; const ui32 y = i / statCount; if (i < 2 * statCount) { const ui32 leafId = y == 0 ? leftPartId : rightPartId; double total = 0; for (int x = 0; x < tempVarsBlockCount; ++x) { total += __ldg(tempVars + i); tempVars += statCount * 2; } statSums[leafId * statCount + statId] = total; } } void UpdatePartitionsPropsForSplit(const TDataPartition* parts, const ui32* leftPartIds, const ui32* rightPartIds, ui32 partCount, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = 2 * partCount; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsForSplitImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(leftPartIds, rightPartIds, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; SaveResultsForSplitImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(leftPartIds, rightPartIds, tempVars, 2 * partCount, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForSingleSplit(const TDataPartition* parts, const ui32 leftPartId, const ui32 rightPartId, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = 2; numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsForSingleSplitImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(leftPartId, rightPartId, parts, source, statLineSize, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (numBlocks.y * numBlocks.z + saveBlockSize - 1) / saveBlockSize; SaveResultsForSingleSplitImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(leftPartId, rightPartId, tempVars, statCount, numBlocks.x, statSums); } } void UpdatePartitionsPropsForOffsets(const ui32* offsets, ui32 count, const float* source, ui32 statCount, ui64 statLineSize, ui32 tempVarsCount, double* tempVars, double* statSums, TCudaStream stream ) { const ui32 blockSize = 512; dim3 numBlocks; numBlocks.y = min(count, 65535); numBlocks.z = statCount; numBlocks.x = CeilDivide(2 * TArchProps::SMCount(), (int)statCount); Y_VERIFY(numBlocks.x * numBlocks.y * numBlocks.z <= tempVarsCount); UpdatePartitionsPropsForOffsetsImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(offsets, source, statLineSize, count, tempVars); { const ui32 saveBlockSize = 256; const ui32 numSaveBlocks = (count * statCount + saveBlockSize - 1) / saveBlockSize; SaveResultsImpl<<<numSaveBlocks, saveBlockSize, 0, stream>>>(nullptr, tempVars, count, statCount, numBlocks.x, statSums); } } __global__ void FloatToDoubleImpl(const float* src, ui32 size, double* dst) { const ui32 i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { WriteThrough(dst + i, (double)__ldg(src + i)); } } void CopyFloatToDouble(const float* src, ui32 size, double* dst, TCudaStream stream) { const ui32 blockSize = 128; const ui32 numBlocks = CeilDivide(size, blockSize); if (numBlocks) { FloatToDoubleImpl<<<numBlocks, blockSize, 0, stream>>>(src, size, dst); } } ui32 GetTempVarsCount(ui32 statCount, ui32 count) { return CeilDivide(2 * TArchProps::SMCount(), (int)statCount) * statCount * count; } }
806f816cbf1d666a21584ddb5b09a03228d8c28d.hip
// !!! This is a file automatically generated by hipify!!! #include <cv.h> #include <highgui.h> #include <math.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <iostream> #include <string> #include <hip/hip_runtime.h> using namespace cv; #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH_M 3 #define MASK_WIDTH_S 5 #define TILE_SIZE 32 //tamao de las submatrices __constant__ char d_M[MASK_WIDTH_M*MASK_WIDTH_M]; __constant__ char d_Mt[MASK_WIDTH_M*MASK_WIDTH_M]; __constant__ char d_S[MASK_WIDTH_S*MASK_WIDTH_S]; __device__ unsigned char clamp(int value) { if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void img2gray(unsigned char *imgOutput, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imgOutput[row * width + col] = imgInput[(row * width + col) * 3 + RED] * 0.299 + imgInput[(row * width + col) * 3 + GREEN] * 0.587 + imgInput[(row * width + col) * 3 + BLUE] * 0.114; } } __global__ void gauss(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); if((row < height) && (col < width)){ for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += imgInput[(N_start_point_row + i)*width+(N_start_point_col + j)] * d_S[i*maskWidth+j]; } } } imgOutput[row * width + col] = clamp(Pvalue/159); } } __global__ void sobelGradX(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { __shared__ float N_ds[TILE_SIZE][TILE_SIZE]; //se establecen la submatriz y queda en memoria compartida int y = blockIdx.y * TILE_SIZE + threadIdx.y; int x = blockIdx.x * TILE_SIZE + threadIdx.x; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_SIZE + ty; int col = bx * TILE_SIZE + tx; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); for(int m = 0; m < col / TILE_SIZE; m=m+1){ N_ds[ty][tx] = imgInput[row*width + m*TILE_SIZE + tx]; __syncthreads(); for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += N_ds[N_start_point_row + i][N_start_point_col + j] * d_M[i*maskWidth+j]; } } } if (y < height && x < width) imgOutput[(y * width + x)] = clamp(Pvalue); __syncthreads(); } } __global__ void sobelGradY(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); if((row < height) && (col < width)){ for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += imgInput[(N_start_point_row + i)*width+(N_start_point_col + j)] * d_Mt[i*maskWidth+j]; } } } imgOutput[row * width + col] = clamp(Pvalue); } } __global__ void sobelFilter(unsigned char *imgSobel, unsigned char *sobelOutputX, unsigned char *sobelOutputY, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imgSobel[row * width + col] = __powf((__powf(sobelOutputX[row * width + col],2) + __powf(sobelOutputY[row * width + col],2)), 0.5) ; } } __global__ void NoSupreMax(int width, int height, unsigned char *imgSobel,unsigned char *nosupmax){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ if((imgSobel[row * width + col-1] < imgSobel[row * width + col]) && (imgSobel[row * width + col]< imgSobel[row * width + col+1])){ nosupmax[row * width + col] = imgSobel[row * width + col]; }else{ nosupmax[row * width + col] = 0; } } } int main(int argc, char **argv) { clock_t start, end; double cpu_time_used; char *imageName = argv[1]; char h_M[] = {-1,0,1,-2,0,2,-1,0,1}; char h_Mt[] = {-1,-2,-1,0,0,0,1,2,1}; char h_S[] = {2,4,5,4,2,4,9,12,9,4,5,12,15,12,5,4,9,12,9,4,2,4,5,4,2}; unsigned char *h_dataRawImage, *h_imgOutput, *h_imgSobel, *h_suavizada, *h_nosupmax; unsigned char *d_dataRawImage, *d_imgOutput, *d_imgSobel, *d_suavizada, *d_nosupmax, *d_sobelOutputX, *d_sobelOutputY; Mat image; image = imread(imageName, 1); Size img_size = image.size(); int width = img_size.width; int height = img_size.height; int size = sizeof(unsigned char) * width * height * image.channels(); int sizeGray = sizeof(unsigned char) * width * height; h_dataRawImage = (unsigned char*)malloc(size); h_imgOutput = (unsigned char*)malloc(sizeGray); h_suavizada = (unsigned char*)malloc(sizeGray); h_imgSobel = (unsigned char*)malloc(sizeGray); h_nosupmax = (unsigned char*)malloc(sizeGray); hipMalloc((void**)&d_dataRawImage,size); hipMalloc((void**)&d_imgOutput,sizeGray); hipMalloc((void**)&d_suavizada,sizeGray); hipMalloc((void**)&d_imgSobel,sizeGray); hipMalloc((void**)&d_nosupmax,sizeGray); hipMalloc((void**)&d_sobelOutputX,sizeGray); hipMalloc((void**)&d_sobelOutputY,sizeGray); hipMalloc((void**)&d_M,sizeof(char)*9); hipMalloc((void**)&d_Mt,sizeof(char)*9); hipMalloc((void**)&d_S,sizeof(char)*25); h_dataRawImage = image.data; start = clock(); hipMemcpy(d_dataRawImage ,h_dataRawImage ,size, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_M,h_M,sizeof(char)*MASK_WIDTH_M*MASK_WIDTH_M); hipMemcpyToSymbol(d_Mt,h_Mt,sizeof(char)*MASK_WIDTH_M*MASK_WIDTH_M); hipMemcpyToSymbol(d_S,h_S,sizeof(char)*MASK_WIDTH_S*MASK_WIDTH_S); int blockSize = 32; dim3 dimBlock(blockSize,blockSize,1); dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1); //Escala de Grises hipLaunchKernelGGL(( img2gray), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imgOutput, d_dataRawImage, width, height); hipDeviceSynchronize(); //Suavizado hipLaunchKernelGGL(( gauss), dim3(dimGrid),dim3(dimBlock), 0, 0, d_suavizada, MASK_WIDTH_S, d_imgOutput, width, height); // Gradient X hipLaunchKernelGGL(( sobelGradX), dim3(dimGrid),dim3(dimBlock), 0, 0, d_sobelOutputX, MASK_WIDTH_M, d_suavizada, width, height); // Gradient Y hipLaunchKernelGGL(( sobelGradY), dim3(dimGrid),dim3(dimBlock), 0, 0, d_sobelOutputY, MASK_WIDTH_M, d_suavizada, width, height); // Gradient Magnitude hipLaunchKernelGGL(( sobelFilter), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imgSobel, d_sobelOutputX, d_sobelOutputY, width, height); //Supresin Mxima hipLaunchKernelGGL(( NoSupreMax), dim3(dimGrid),dim3(dimBlock), 0, 0, width,height,d_imgSobel,d_nosupmax); hipMemcpy(h_imgOutput,d_imgOutput,sizeGray,hipMemcpyDeviceToHost); hipMemcpy(h_suavizada,d_suavizada,sizeGray,hipMemcpyDeviceToHost); hipMemcpy(h_imgSobel,d_imgSobel,sizeGray,hipMemcpyDeviceToHost); hipMemcpy(h_nosupmax,d_nosupmax,sizeGray,hipMemcpyDeviceToHost); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("%.10f\n",cpu_time_used); Mat gray_image; gray_image.create(height,width,CV_8UC1); gray_image.data = h_imgOutput; Mat suav_image; suav_image.create(height,width,CV_8UC1); suav_image.data = h_suavizada; Mat sobel_image; sobel_image.create(height,width,CV_8UC1); sobel_image.data = h_imgSobel; Mat nosupmax_image; nosupmax_image.create(height,width,CV_8UC1); nosupmax_image.data = h_nosupmax; namedWindow(imageName, WINDOW_NORMAL); namedWindow("Gray Image", WINDOW_NORMAL); namedWindow("Gray Image Suavizada", WINDOW_NORMAL); namedWindow("Sobel Image", WINDOW_NORMAL); namedWindow("No Supesion Image", WINDOW_NORMAL); imshow(imageName,image); imshow("Gray Image", gray_image); imshow("Gray Image Suavizada", suav_image); imshow("Sobel Image", sobel_image); imshow("No Supesion Image", nosupmax_image); waitKey(0); hipFree(d_dataRawImage); hipFree(d_imgOutput); hipFree(d_imgSobel); hipFree(d_suavizada); hipFree(d_nosupmax); hipFree(d_sobelOutputX); hipFree(d_sobelOutputY); hipFree(d_M); hipFree(d_Mt); hipFree(d_S); return 0; }
806f816cbf1d666a21584ddb5b09a03228d8c28d.cu
#include <cv.h> #include <highgui.h> #include <math.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <iostream> #include <string> #include <cuda.h> using namespace cv; #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH_M 3 #define MASK_WIDTH_S 5 #define TILE_SIZE 32 //tamaño de las submatrices __constant__ char d_M[MASK_WIDTH_M*MASK_WIDTH_M]; __constant__ char d_Mt[MASK_WIDTH_M*MASK_WIDTH_M]; __constant__ char d_S[MASK_WIDTH_S*MASK_WIDTH_S]; __device__ unsigned char clamp(int value) { if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void img2gray(unsigned char *imgOutput, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imgOutput[row * width + col] = imgInput[(row * width + col) * 3 + RED] * 0.299 + imgInput[(row * width + col) * 3 + GREEN] * 0.587 + imgInput[(row * width + col) * 3 + BLUE] * 0.114; } } __global__ void gauss(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); if((row < height) && (col < width)){ for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += imgInput[(N_start_point_row + i)*width+(N_start_point_col + j)] * d_S[i*maskWidth+j]; } } } imgOutput[row * width + col] = clamp(Pvalue/159); } } __global__ void sobelGradX(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { __shared__ float N_ds[TILE_SIZE][TILE_SIZE]; //se establecen la submatriz y queda en memoria compartida int y = blockIdx.y * TILE_SIZE + threadIdx.y; int x = blockIdx.x * TILE_SIZE + threadIdx.x; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_SIZE + ty; int col = bx * TILE_SIZE + tx; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); for(int m = 0; m < col / TILE_SIZE; m=m+1){ N_ds[ty][tx] = imgInput[row*width + m*TILE_SIZE + tx]; __syncthreads(); for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += N_ds[N_start_point_row + i][N_start_point_col + j] * d_M[i*maskWidth+j]; } } } if (y < height && x < width) imgOutput[(y * width + x)] = clamp(Pvalue); __syncthreads(); } } __global__ void sobelGradY(unsigned char *imgOutput, int maskWidth, unsigned char *imgInput, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int Pvalue = 0; int N_start_point_row = row - (maskWidth/2); int N_start_point_col = col - (maskWidth/2); if((row < height) && (col < width)){ for(int i = 0; i < maskWidth; i++){ for(int j = 0; j < maskWidth; j++ ){ if((N_start_point_col + j >=0 && N_start_point_col + j < width) &&(N_start_point_row + i >=0 && N_start_point_row + i < height)){ Pvalue += imgInput[(N_start_point_row + i)*width+(N_start_point_col + j)] * d_Mt[i*maskWidth+j]; } } } imgOutput[row * width + col] = clamp(Pvalue); } } __global__ void sobelFilter(unsigned char *imgSobel, unsigned char *sobelOutputX, unsigned char *sobelOutputY, int width, int height) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imgSobel[row * width + col] = __powf((__powf(sobelOutputX[row * width + col],2) + __powf(sobelOutputY[row * width + col],2)), 0.5) ; } } __global__ void NoSupreMax(int width, int height, unsigned char *imgSobel,unsigned char *nosupmax){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ if((imgSobel[row * width + col-1] < imgSobel[row * width + col]) && (imgSobel[row * width + col]< imgSobel[row * width + col+1])){ nosupmax[row * width + col] = imgSobel[row * width + col]; }else{ nosupmax[row * width + col] = 0; } } } int main(int argc, char **argv) { clock_t start, end; double cpu_time_used; char *imageName = argv[1]; char h_M[] = {-1,0,1,-2,0,2,-1,0,1}; char h_Mt[] = {-1,-2,-1,0,0,0,1,2,1}; char h_S[] = {2,4,5,4,2,4,9,12,9,4,5,12,15,12,5,4,9,12,9,4,2,4,5,4,2}; unsigned char *h_dataRawImage, *h_imgOutput, *h_imgSobel, *h_suavizada, *h_nosupmax; unsigned char *d_dataRawImage, *d_imgOutput, *d_imgSobel, *d_suavizada, *d_nosupmax, *d_sobelOutputX, *d_sobelOutputY; Mat image; image = imread(imageName, 1); Size img_size = image.size(); int width = img_size.width; int height = img_size.height; int size = sizeof(unsigned char) * width * height * image.channels(); int sizeGray = sizeof(unsigned char) * width * height; h_dataRawImage = (unsigned char*)malloc(size); h_imgOutput = (unsigned char*)malloc(sizeGray); h_suavizada = (unsigned char*)malloc(sizeGray); h_imgSobel = (unsigned char*)malloc(sizeGray); h_nosupmax = (unsigned char*)malloc(sizeGray); cudaMalloc((void**)&d_dataRawImage,size); cudaMalloc((void**)&d_imgOutput,sizeGray); cudaMalloc((void**)&d_suavizada,sizeGray); cudaMalloc((void**)&d_imgSobel,sizeGray); cudaMalloc((void**)&d_nosupmax,sizeGray); cudaMalloc((void**)&d_sobelOutputX,sizeGray); cudaMalloc((void**)&d_sobelOutputY,sizeGray); cudaMalloc((void**)&d_M,sizeof(char)*9); cudaMalloc((void**)&d_Mt,sizeof(char)*9); cudaMalloc((void**)&d_S,sizeof(char)*25); h_dataRawImage = image.data; start = clock(); cudaMemcpy(d_dataRawImage ,h_dataRawImage ,size, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_M,h_M,sizeof(char)*MASK_WIDTH_M*MASK_WIDTH_M); cudaMemcpyToSymbol(d_Mt,h_Mt,sizeof(char)*MASK_WIDTH_M*MASK_WIDTH_M); cudaMemcpyToSymbol(d_S,h_S,sizeof(char)*MASK_WIDTH_S*MASK_WIDTH_S); int blockSize = 32; dim3 dimBlock(blockSize,blockSize,1); dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1); //Escala de Grises img2gray<<<dimGrid,dimBlock>>>(d_imgOutput, d_dataRawImage, width, height); cudaDeviceSynchronize(); //Suavizado gauss<<<dimGrid,dimBlock>>>(d_suavizada, MASK_WIDTH_S, d_imgOutput, width, height); // Gradient X sobelGradX<<<dimGrid,dimBlock>>>(d_sobelOutputX, MASK_WIDTH_M, d_suavizada, width, height); // Gradient Y sobelGradY<<<dimGrid,dimBlock>>>(d_sobelOutputY, MASK_WIDTH_M, d_suavizada, width, height); // Gradient Magnitude sobelFilter<<<dimGrid,dimBlock>>>(d_imgSobel, d_sobelOutputX, d_sobelOutputY, width, height); //Supresión Máxima NoSupreMax<<<dimGrid,dimBlock>>>(width,height,d_imgSobel,d_nosupmax); cudaMemcpy(h_imgOutput,d_imgOutput,sizeGray,cudaMemcpyDeviceToHost); cudaMemcpy(h_suavizada,d_suavizada,sizeGray,cudaMemcpyDeviceToHost); cudaMemcpy(h_imgSobel,d_imgSobel,sizeGray,cudaMemcpyDeviceToHost); cudaMemcpy(h_nosupmax,d_nosupmax,sizeGray,cudaMemcpyDeviceToHost); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("%.10f\n",cpu_time_used); Mat gray_image; gray_image.create(height,width,CV_8UC1); gray_image.data = h_imgOutput; Mat suav_image; suav_image.create(height,width,CV_8UC1); suav_image.data = h_suavizada; Mat sobel_image; sobel_image.create(height,width,CV_8UC1); sobel_image.data = h_imgSobel; Mat nosupmax_image; nosupmax_image.create(height,width,CV_8UC1); nosupmax_image.data = h_nosupmax; namedWindow(imageName, WINDOW_NORMAL); namedWindow("Gray Image", WINDOW_NORMAL); namedWindow("Gray Image Suavizada", WINDOW_NORMAL); namedWindow("Sobel Image", WINDOW_NORMAL); namedWindow("No Supesion Image", WINDOW_NORMAL); imshow(imageName,image); imshow("Gray Image", gray_image); imshow("Gray Image Suavizada", suav_image); imshow("Sobel Image", sobel_image); imshow("No Supesion Image", nosupmax_image); waitKey(0); cudaFree(d_dataRawImage); cudaFree(d_imgOutput); cudaFree(d_imgSobel); cudaFree(d_suavizada); cudaFree(d_nosupmax); cudaFree(d_sobelOutputX); cudaFree(d_sobelOutputY); cudaFree(d_M); cudaFree(d_Mt); cudaFree(d_S); return 0; }
e73caa2dca8620dcfdfb82230fd22284662c828b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Stan Tomov */ #include "magma_internal.h" #define PRECISION_z #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __global__ void zgeam_kernel_nn( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += ibx + tx + (iby + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // perform the operation on NX-by-NB subtile i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { C[j2*ldc] = alpha*A[j2*lda] +beta*B[j2*ldb]; } } } __syncthreads(); // move to next subtile A += NX; B += NX; C += NX; } } __global__ void zgeam_kernel_nc( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sB[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += iby + tx + (ibx + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from B into sB i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sB[ty + j2][tx] = B[j2*ldb]; } } } __syncthreads(); // save NB-by-NX subtile from A & sB into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = alpha*A[i2 + j2*lda] + beta*sB[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX*lda; B += NX; C += NX*ldc; } } __global__ void zgeam_kernel_cn( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += iby + tx + (ibx + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA & B into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = alpha*sA[tx + i2][ty + j2] + beta*B[i2 + j2*ldb]; } } } } __syncthreads(); // move to next subtile A += NX; B += NX*ldb; C += NX*ldc; } } __global__ void zgeam_kernel_cc( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A & B into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = alpha*A[j2*lda] + beta*B[j2*ldb]; } } } __syncthreads(); // save NB-by-NX subtile from sA into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; B += NX; C += NX*ldc; } } /***************************************************************************//** Purpose ------- zgeam adds/transposes matrices: C = alpha*op( A ) + beta*op( B ). The operation supports also the following in-place transformations C = alpha*C + beta*op( B ) C = alpha*op( A ) + beta*C Arguments --------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER The number of rows of the matrix op( dA ). M >= 0. @param[in] n INTEGER The number of columns of the matrix op( dA ). N >= 0. @param[in] alpha COMPLEX_16 On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX_16 array, dimension (LDDA,k), where k is N when transA = MagmaNoTrans, and is M otherwise. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N when transA = MagmaNoTrans, and LDDA >= M otherwise. @param[in] beta COMPLEX_16 On entry, BETA specifies the scalar beta. @param[in] dB COMPLEX_16 array, dimension (LDDB,k), where k is N when transB = MagmaNoTrans, and is M otherwise. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= N when transB = MagmaNoTrans, and LDDB >= M otherwise. @param[in,out] dC COMPLEX_16 array, dimension (LDDC,N). dC can be input, making the operation in-place, if dC is passed as one of the pointers to dA or dB. The M-by-N matrix dC. @param[in] lddc INTEGER The leading dimension of the array dC. LDDC >= M. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_zgeam( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex beta, magmaDoubleComplex_const_ptr dB, magma_int_t lddb, magmaDoubleComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( dA == dC && transA != MagmaNoTrans ) info = -6; else if ( transA == MagmaNoTrans ? ldda < m : ldda < n ) info = -7; else if ( dB == dC && transB != MagmaNoTrans ) info = -9; else if ( transB == MagmaNoTrans ? lddb < m : lddb < n ) info = -10; else if ( lddc < m ) info = -12; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( n, NB ), magma_ceildiv( m, NB ) ); if ( MAGMA_Z_EQUAL( alpha, MAGMA_Z_ZERO ) && MAGMA_Z_EQUAL( beta, MAGMA_Z_ZERO ) ) // set to 0 magmablas_zlaset( MagmaFull, m, n, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dC, lddc, queue ); else if ( transA == MagmaNoTrans && transB == MagmaNoTrans ){ dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); hipLaunchKernelGGL(( zgeam_kernel_nn), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, dA, ldda, beta, dB, lddb, dC, lddc); } else if ( transA == MagmaNoTrans ) hipLaunchKernelGGL(( zgeam_kernel_nc), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); else if ( transB == MagmaNoTrans ) hipLaunchKernelGGL(( zgeam_kernel_cn), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); else hipLaunchKernelGGL(( zgeam_kernel_cc), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); }
e73caa2dca8620dcfdfb82230fd22284662c828b.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> s d c @author Stan Tomov */ #include "magma_internal.h" #define PRECISION_z #if defined(PRECISION_z) #define NX 16 #else #define NX 32 #endif #define NB 32 #define NY 8 // tile M-by-N matrix with ceil(M/NB) by ceil(N/NB) tiles sized NB-by-NB. // uses NX-by-NY threads, where NB/NX, NB/NY, NX/NY evenly. // subtile each NB-by-NB tile with (NB/NX) subtiles sized NX-by-NB // for each subtile // load NX-by-NB subtile transposed from A into sA, as (NB/NY) blocks sized NX-by-NY // save NB-by-NX subtile from sA into AT, as (NB/NX)*(NX/NY) blocks sized NX-by-NY // A += NX // AT += NX*ldat // // e.g., with NB=32, NX=32, NY=8 ([sdc] precisions) // load 32x32 subtile as 4 blocks of 32x8 columns: (A11 A12 A13 A14 ) // save 32x32 subtile as 1*4 blocks of 32x8 columns: (AT11 AT12 AT13 AT14) // // e.g., with NB=32, NX=16, NY=8 (z precision) // load 16x32 subtile as 4 blocks of 16x8 columns: (A11 A12 A13 A14) // save 32x16 subtile as 2*2 blocks of 16x8 columns: (AT11 AT12) // (AT21 AT22) __global__ void zgeam_kernel_nn( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += ibx + tx + (iby + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // perform the operation on NX-by-NB subtile i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { C[j2*ldc] = alpha*A[j2*lda] +beta*B[j2*ldb]; } } } __syncthreads(); // move to next subtile A += NX; B += NX; C += NX; } } __global__ void zgeam_kernel_nc( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sB[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += iby + tx + (ibx + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from B into sB i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sB[ty + j2][tx] = B[j2*ldb]; } } } __syncthreads(); // save NB-by-NX subtile from A & sB into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = alpha*A[i2 + j2*lda] + beta*sB[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX*lda; B += NX; C += NX*ldc; } } __global__ void zgeam_kernel_cn( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += iby + tx + (ibx + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = A[j2*lda]; } } } __syncthreads(); // save NB-by-NX subtile from sA & B into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = alpha*sA[tx + i2][ty + j2] + beta*B[i2 + j2*ldb]; } } } } __syncthreads(); // move to next subtile A += NX; B += NX*ldb; C += NX*ldc; } } __global__ void zgeam_kernel_cc( int m, int n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, int lda, magmaDoubleComplex beta, const magmaDoubleComplex *B, int ldb, magmaDoubleComplex *C, int ldc) { __shared__ magmaDoubleComplex sA[NB][NX+1]; int tx = threadIdx.x; int ty = threadIdx.y; int ibx = blockIdx.x*NB; int iby = blockIdx.y*NB; int i, j; A += ibx + tx + (iby + ty)*lda; B += ibx + tx + (iby + ty)*ldb; C += iby + tx + (ibx + ty)*ldc; #pragma unroll for( int tile=0; tile < NB/NX; ++tile ) { // load NX-by-NB subtile transposed from A & B into sA i = ibx + tx + tile*NX; j = iby + ty; if (i < m) { #pragma unroll for( int j2=0; j2 < NB; j2 += NY ) { if (j + j2 < n) { sA[ty + j2][tx] = alpha*A[j2*lda] + beta*B[j2*ldb]; } } } __syncthreads(); // save NB-by-NX subtile from sA into C i = iby + tx; j = ibx + ty + tile*NX; #pragma unroll for( int i2=0; i2 < NB; i2 += NX ) { if (i + i2 < n) { #pragma unroll for( int j2=0; j2 < NX; j2 += NY ) { if (j + j2 < m) { C[i2 + j2*ldc] = sA[tx + i2][ty + j2]; } } } } __syncthreads(); // move to next subtile A += NX; B += NX; C += NX*ldc; } } /***************************************************************************//** Purpose ------- zgeam adds/transposes matrices: C = alpha*op( A ) + beta*op( B ). The operation supports also the following in-place transformations C = alpha*C + beta*op( B ) C = alpha*op( A ) + beta*C Arguments --------- @param[in] transA magma_trans_t. On entry, transA specifies the form of op( A ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( A ) = A. - = MagmaTrans: op( A ) = A**T. - = MagmaConjTrans: op( A ) = A**H. @param[in] transB magma_trans_t. On entry, transB specifies the form of op( B ) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op( B ) = B. - = MagmaTrans: op( B ) = B**T. - = MagmaConjTrans: op( B ) = B**H. @param[in] m INTEGER The number of rows of the matrix op( dA ). M >= 0. @param[in] n INTEGER The number of columns of the matrix op( dA ). N >= 0. @param[in] alpha COMPLEX_16 On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX_16 array, dimension (LDDA,k), where k is N when transA = MagmaNoTrans, and is M otherwise. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N when transA = MagmaNoTrans, and LDDA >= M otherwise. @param[in] beta COMPLEX_16 On entry, BETA specifies the scalar beta. @param[in] dB COMPLEX_16 array, dimension (LDDB,k), where k is N when transB = MagmaNoTrans, and is M otherwise. @param[in] lddb INTEGER The leading dimension of the array dB. LDDB >= N when transB = MagmaNoTrans, and LDDB >= M otherwise. @param[in,out] dC COMPLEX_16 array, dimension (LDDC,N). dC can be input, making the operation in-place, if dC is passed as one of the pointers to dA or dB. The M-by-N matrix dC. @param[in] lddc INTEGER The leading dimension of the array dC. LDDC >= M. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_transpose *******************************************************************************/ extern "C" void magmablas_zgeam( magma_trans_t transA, magma_trans_t transB, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex beta, magmaDoubleComplex_const_ptr dB, magma_int_t lddb, magmaDoubleComplex_ptr dC, magma_int_t lddc, magma_queue_t queue ) { magma_int_t info = 0; if ( transA != MagmaNoTrans && transA != MagmaTrans && transA != MagmaConjTrans ) info = -1; else if ( transB != MagmaNoTrans && transB != MagmaTrans && transB != MagmaConjTrans ) info = -2; else if ( m < 0 ) info = -3; else if ( n < 0 ) info = -4; else if ( dA == dC && transA != MagmaNoTrans ) info = -6; else if ( transA == MagmaNoTrans ? ldda < m : ldda < n ) info = -7; else if ( dB == dC && transB != MagmaNoTrans ) info = -9; else if ( transB == MagmaNoTrans ? lddb < m : lddb < n ) info = -10; else if ( lddc < m ) info = -12; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } /* Quick return */ if ( (m == 0) || (n == 0) ) return; dim3 threads( NX, NY ); dim3 grid( magma_ceildiv( n, NB ), magma_ceildiv( m, NB ) ); if ( MAGMA_Z_EQUAL( alpha, MAGMA_Z_ZERO ) && MAGMA_Z_EQUAL( beta, MAGMA_Z_ZERO ) ) // set to 0 magmablas_zlaset( MagmaFull, m, n, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dC, lddc, queue ); else if ( transA == MagmaNoTrans && transB == MagmaNoTrans ){ dim3 grid( magma_ceildiv( m, NB ), magma_ceildiv( n, NB ) ); zgeam_kernel_nn<<<grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, dA, ldda, beta, dB, lddb, dC, lddc); } else if ( transA == MagmaNoTrans ) zgeam_kernel_nc<<<grid, threads, 0, queue->cuda_stream() >>> (n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); else if ( transB == MagmaNoTrans ) zgeam_kernel_cn<<<grid, threads, 0, queue->cuda_stream() >>> (n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); else zgeam_kernel_cc<<<grid, threads, 0, queue->cuda_stream() >>> (n, m, alpha, dA, ldda, beta, dB, lddb, dC, lddc); }
d57028a51d4b4738920b0f12eb6f2c8c3c55e31a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> s d c */ #include "common_magma.h" #define PRECISION_z #define hemv_bs 32 #define bank_shift 33 /******************************************************************************* * Lower case, where n is multiple of block size (hemv_bs) */ __global__ void zhemv_kernel_fermi_L_special_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; if ( blkc < my_gpu_id ) { return; } magmaDoubleComplex res = MAGMA_Z_ZERO; // used in scan the row magmaDoubleComplex res_ = MAGMA_Z_ZERO; // used in scan the column magmaDoubleComplex res1 = MAGMA_Z_ZERO; // tem for res magmaDoubleComplex res2 = MAGMA_Z_ZERO; // tem for res_ __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex sdata [hemv_bs][9]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += ty * lda + tx; if ( ty == 0 ) { buff[tx] = x[0]; if ( blkc == 0 && my_gpu_id == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff; int flag = 0; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4 + 4); i++) { if ( i < tx ) { la[0][bank_shift * tx + i] = cuConj( la[0][ i * bank_shift + tx] ); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; flag = 1; } x -= blkc * hemv_bs * incx; x = x - tx*incx; int wc_c = my_gpu_id; int count = 0; WC += break_d + tx; int num_blocks_iters = (blkc +1) /ngpu - flag; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { num_blocks_iters += 1; } x += (my_gpu_id) * hemv_bs; if ( blkc > my_gpu_id ) { for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; if ( my_gpu_id == 0 && tx < kstan && count == 1 ) { buff2[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } sdata[tx][ty] = res_; __syncthreads(); if ( ty == 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } /************************************************************** * Lower case for generic sizes */ __global__ void zhemv_kernel_fermi_L_generic_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int m_mod_nb, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; if ( blkc < my_gpu_id ) { return; } magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex res_ = MAGMA_Z_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; magmaDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex sdata [hemv_bs][9]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += lda * ty; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_nb ) { buff[tx] = MAGMA_Z_ZERO; } else buff[tx] = x[0]; } if ( tx > m_mod_nb ) trackA=m_mod_nb; else trackA=tx; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx; A += trackA; } if ( ty == 0 ) { if ( my_gpu_id == 0 && blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } int flag = 0; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_nb ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4+4); i++) { if ( i < tx ) { la[0][bank_shift*tx+i] = cuConj(la[0][i*bank_shift+tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; flag = 1; } __syncthreads(); x = x - break_d*incx; x = x - tx * incx; int wc_c = my_gpu_id; int count = 0; WC += break_d + tx; int num_blocks_iters = (blkc +1) /ngpu - flag; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { num_blocks_iters += 1; } x += (my_gpu_id) * hemv_bs; if ( blkc > my_gpu_id ) { for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; if ( my_gpu_id == 0 && tx < kstan && count == 1 ) { buff2[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } sdata[tx][ty] = res_; __syncthreads(); if ( ty == 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } /************************************************************** * */ __global__ void zhemv_kernel_fermi_L_update_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan ) { #if (__CUDA_ARCH__ >= 200) int i; int tx = threadIdx.x; int ind = blockIdx.x * hemv_bs + tx; magmaDoubleComplex Ca; Ca = MAGMA_Z_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x * hemv_bs; i < n; i += hemv_bs) { Ca += WC[0]; WC += hemv_bs; } if ( ind < n && ind >= kstan ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; #endif /* (__CUDA_ARCH__ >= 200) */ } extern "C" void magmablas_zhemv_fermi_L_mgpu_offset_32( magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *dy, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t my_gpu_id, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; dA += ldda * num_blocks_skipped * nb + the_chosen_block_id * nb; dx += the_chosen_block_id * nb; dy += the_chosen_block_id * nb; magma_int_t blocks = (n - 1)/hemv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of hemv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % hemv_bs == 0 ) { hipLaunchKernelGGL(( zhemv_kernel_fermi_L_special_mgpu_offset_32), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } else { magma_int_t m_mod_nb = (n % hemv_bs) - 1; hipLaunchKernelGGL(( zhemv_kernel_fermi_L_generic_mgpu_offset_32), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, m_mod_nb, my_gpu_id, ngpu, nb, kstan); } hipLaunchKernelGGL(( zhemv_kernel_fermi_L_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } /******************************************************************************* * Upper case, where n is multiple of block size (hemv_bs) */ __global__ void zhemv_kernel_fermi_U_special_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; // used in scan the row magmaDoubleComplex res_ = MAGMA_Z_ZERO; // used in scan the column magmaDoubleComplex res1 = MAGMA_Z_ZERO; // tem for res magmaDoubleComplex res2 = MAGMA_Z_ZERO; // tem for res_ __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += ty * lda + tx; if ( ty == 0 ) { buff[tx] = x[0]; if ( blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4 + 4); i++) { if ( i > tx ) { la[0][bank_shift * tx + i] = cuConj(la[0][ i * bank_shift + tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; } __syncthreads(); x -= (break_d + tx) * incx; // return to the beginning x += my_gpu_id * hemv_bs; int wc_c = my_gpu_id; int total_blocks_gpu = gridDim.x /ngpu; if ( my_gpu_id < (gridDim.x % ngpu) ) { total_blocks_gpu += 1; } int shift = (blkc +1) /ngpu; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { shift += 1; } #pragma unroll for(int s=0; s < shift; s++) { x += ngpu * hemv_bs; A += lda * hemv_bs; wc_c += ngpu; } WC += break_d + tx; int num_blocks_iters = total_blocks_gpu - shift; int count = 0; for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty] = res_; __syncthreads(); if ( ty == 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void zhemv_kernel_fermi_U_generic_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int m_mod_thread_x, int my_gpu_id, int ngpu, int nb, int kstan, int the_right_gpu) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex res_ = MAGMA_Z_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; magmaDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += lda * ty; int trackA; if ( blkc == (gridDim.x - 1) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_Z_ZERO; } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } A += tx; } if ( ty == 0 ) { if ( blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4+4); i++) { if ( i > tx ) { la[0][bank_shift * tx + i] = cuConj(la[0][ i * bank_shift + tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; } x -= (break_d + tx) * incx; // return to the beginning x += (my_gpu_id) * hemv_bs; // int wc_c = my_gpu_id; int total_blocks_gpu = gridDim.x /ngpu; if ( my_gpu_id < (gridDim.x % ngpu) ) { total_blocks_gpu += 1; } int shift = (blkc +1) /ngpu; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { shift += 1; } #pragma unroll for(int s=0; s < shift; s++) { x += ngpu * hemv_bs; A += lda * hemv_bs; wc_c += ngpu; } WC += break_d + tx; int num_blocks_iters = total_blocks_gpu - shift; int count = 0; for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; if ( my_gpu_id == the_right_gpu && s == num_blocks_iters-1 ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff2[tx] = MAGMA_Z_ZERO; } else buff2[tx] = x[tx]; } #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_ZERO; } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } __syncthreads(); } // end of the_right_gpu else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); } #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty] = res_; __syncthreads(); if ( ty == 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void zhemv_kernel_fermi_U_update_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan ) { #if (__CUDA_ARCH__ >= 200) int i; int tx = threadIdx.x; int ind = blockIdx.x * hemv_bs + tx; magmaDoubleComplex Ca; Ca = MAGMA_Z_ZERO; WC += blockIdx.x * lda + tx; for(i = 0; i < (blockIdx.x+1)*hemv_bs; i += hemv_bs) { Ca += WC[0]; WC += hemv_bs; } if ( ind < n && ind >= kstan ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; #endif /* (__CUDA_ARCH__ >= 200) */ } extern "C" void magmablas_zhemv_fermi_U_mgpu_offset_32( magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *dy, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t my_gpu_id, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped, magma_int_t the_right_gpu) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; dA += ldda * num_blocks_skipped * nb + the_chosen_block_id * nb; dx += the_chosen_block_id * nb; dy += the_chosen_block_id * nb; magma_int_t blocks = (n - 1)/hemv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of hemv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % hemv_bs == 0 ) { hipLaunchKernelGGL(( zhemv_kernel_fermi_U_special_mgpu_offset_32), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } else { magma_int_t m_mod_thread_x = (n % hemv_bs) - 1; hipLaunchKernelGGL(( zhemv_kernel_fermi_U_generic_mgpu_offset_32), dim3(grid), dim3(threads), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, m_mod_thread_x, my_gpu_id, ngpu, nb, kstan, the_right_gpu); } hipLaunchKernelGGL(( zhemv_kernel_fermi_U_update_mgpu_offset_32), dim3(grid), dim3(threads_u), 0, magma_stream , n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } /************************************************************************* Purpose ------- magmablas_zhemv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX*16 array of dimension ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. */ extern "C" magma_int_t magmablas_zhemv_mgpu_32_offset( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_queue_t queues[][10]) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); magmablasSetKernelStream( queues[dev][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % ngpu; magma_int_t num_blocks_skipped = the_chosen_block_id / ngpu; if ( dev < the_chosen_gpu_id ) { num_blocks_skipped += 1; } int new_gpu_id = ( dev + ngpu - the_chosen_gpu_id ) % ngpu; magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; the_right_gpu = ( the_right_gpu + ngpu - the_chosen_gpu_id ) % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_mgpu_32_offset( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb, magma_int_t offset) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); // magmablasSetKernelStream( queues[dev][0] ); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % ngpu; magma_int_t num_blocks_skipped = the_chosen_block_id / ngpu; if ( dev < the_chosen_gpu_id ) { num_blocks_skipped += 1; } int new_gpu_id = ( dev + ngpu - the_chosen_gpu_id ) % ngpu; magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; the_right_gpu = ( the_right_gpu + ngpu - the_chosen_gpu_id ) % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_mgpu_32( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], dev, ngpu, nb, 0, 0, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], dev, ngpu, nb, 0, 0); } } return MAGMA_SUCCESS; }
d57028a51d4b4738920b0f12eb6f2c8c3c55e31a.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> s d c */ #include "common_magma.h" #define PRECISION_z #define hemv_bs 32 #define bank_shift 33 /******************************************************************************* * Lower case, where n is multiple of block size (hemv_bs) */ __global__ void zhemv_kernel_fermi_L_special_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; if ( blkc < my_gpu_id ) { return; } magmaDoubleComplex res = MAGMA_Z_ZERO; // used in scan the row magmaDoubleComplex res_ = MAGMA_Z_ZERO; // used in scan the column magmaDoubleComplex res1 = MAGMA_Z_ZERO; // tem for res magmaDoubleComplex res2 = MAGMA_Z_ZERO; // tem for res_ __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex sdata [hemv_bs][9]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += ty * lda + tx; if ( ty == 0 ) { buff[tx] = x[0]; if ( blkc == 0 && my_gpu_id == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff; int flag = 0; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4 + 4); i++) { if ( i < tx ) { la[0][bank_shift * tx + i] = cuConj( la[0][ i * bank_shift + tx] ); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; flag = 1; } x -= blkc * hemv_bs * incx; x = x - tx*incx; int wc_c = my_gpu_id; int count = 0; WC += break_d + tx; int num_blocks_iters = (blkc +1) /ngpu - flag; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { num_blocks_iters += 1; } x += (my_gpu_id) * hemv_bs; if ( blkc > my_gpu_id ) { for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; if ( my_gpu_id == 0 && tx < kstan && count == 1 ) { buff2[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } sdata[tx][ty] = res_; __syncthreads(); if ( ty == 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } /************************************************************** * Lower case for generic sizes */ __global__ void zhemv_kernel_fermi_L_generic_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int m_mod_nb, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; if ( blkc < my_gpu_id ) { return; } magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex res_ = MAGMA_Z_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; magmaDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex sdata [hemv_bs][9]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += lda * ty; int trackA; if ( blkc == ( gridDim.x - 1 ) ) { if ( ty == 0 ) { if ( tx > m_mod_nb ) { buff[tx] = MAGMA_Z_ZERO; } else buff[tx] = x[0]; } if ( tx > m_mod_nb ) trackA=m_mod_nb; else trackA=tx; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } trackA = tx; A += trackA; } if ( ty == 0 ) { if ( my_gpu_id == 0 && blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } int flag = 0; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_nb ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4+4); i++) { if ( i < tx ) { la[0][bank_shift*tx+i] = cuConj(la[0][i*bank_shift+tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; flag = 1; } __syncthreads(); x = x - break_d*incx; x = x - tx * incx; int wc_c = my_gpu_id; int count = 0; WC += break_d + tx; int num_blocks_iters = (blkc +1) /ngpu - flag; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { num_blocks_iters += 1; } x += (my_gpu_id) * hemv_bs; if ( blkc > my_gpu_id ) { for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; if ( my_gpu_id == 0 && tx < kstan && count == 1 ) { buff2[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff2; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } sdata[tx][ty] = res_; __syncthreads(); if ( ty == 1 ) { res2 = sdata[tx][0]+sdata[tx][1] + sdata[tx][2]+sdata[tx][3] + sdata[tx][4]+sdata[tx][5] + sdata[tx][6]+sdata[tx][7]; WC[wc_c*lda ] = res2; } wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } /************************************************************** * */ __global__ void zhemv_kernel_fermi_L_update_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan ) { #if (__CUDA_ARCH__ >= 200) int i; int tx = threadIdx.x; int ind = blockIdx.x * hemv_bs + tx; magmaDoubleComplex Ca; Ca = MAGMA_Z_ZERO; WC += ind + lda * blockIdx.x; for(i = blockIdx.x * hemv_bs; i < n; i += hemv_bs) { Ca += WC[0]; WC += hemv_bs; } if ( ind < n && ind >= kstan ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; #endif /* (__CUDA_ARCH__ >= 200) */ } extern "C" void magmablas_zhemv_fermi_L_mgpu_offset_32( magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *dy, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t my_gpu_id, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; dA += ldda * num_blocks_skipped * nb + the_chosen_block_id * nb; dx += the_chosen_block_id * nb; dy += the_chosen_block_id * nb; magma_int_t blocks = (n - 1)/hemv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of hemv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % hemv_bs == 0 ) { zhemv_kernel_fermi_L_special_mgpu_offset_32<<< grid, threads, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } else { magma_int_t m_mod_nb = (n % hemv_bs) - 1; zhemv_kernel_fermi_L_generic_mgpu_offset_32<<< grid, threads, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, m_mod_nb, my_gpu_id, ngpu, nb, kstan); } zhemv_kernel_fermi_L_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } /******************************************************************************* * Upper case, where n is multiple of block size (hemv_bs) */ __global__ void zhemv_kernel_fermi_U_special_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; // used in scan the row magmaDoubleComplex res_ = MAGMA_Z_ZERO; // used in scan the column magmaDoubleComplex res1 = MAGMA_Z_ZERO; // tem for res magmaDoubleComplex res2 = MAGMA_Z_ZERO; // tem for res_ __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += ty * lda + tx; if ( ty == 0 ) { buff[tx] = x[0]; if ( blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } // obtain the vector x store in buff; if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4 + 4); i++) { if ( i > tx ) { la[0][bank_shift * tx + i] = cuConj(la[0][ i * bank_shift + tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; } __syncthreads(); x -= (break_d + tx) * incx; // return to the beginning x += my_gpu_id * hemv_bs; int wc_c = my_gpu_id; int total_blocks_gpu = gridDim.x /ngpu; if ( my_gpu_id < (gridDim.x % ngpu) ) { total_blocks_gpu += 1; } int shift = (blkc +1) /ngpu; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { shift += 1; } #pragma unroll for(int s=0; s < shift; s++) { x += ngpu * hemv_bs; A += lda * hemv_bs; wc_c += ngpu; } WC += break_d + tx; int num_blocks_iters = total_blocks_gpu - shift; int count = 0; for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty] = res_; __syncthreads(); if ( ty == 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void zhemv_kernel_fermi_U_generic_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int m_mod_thread_x, int my_gpu_id, int ngpu, int nb, int kstan, int the_right_gpu) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; int ty = threadIdx.y; int blkc = blockIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex res_ = MAGMA_Z_ZERO; magmaDoubleComplex res1 = MAGMA_Z_ZERO; magmaDoubleComplex res2 = MAGMA_Z_ZERO; __shared__ magmaDoubleComplex la [hemv_bs][bank_shift]; __shared__ magmaDoubleComplex buff [hemv_bs]; __shared__ magmaDoubleComplex buff2 [hemv_bs]; int break_d = hemv_bs * blkc; x += (break_d + tx) * incx; A += break_d; A += lda * ty; int trackA; if ( blkc == (gridDim.x - 1) ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff[tx] = MAGMA_Z_ZERO; } else buff[tx] = x[0]; } if ( tx > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx; A += trackA; } else { if ( ty == 0 ) { buff[tx] = x[0]; } A += tx; } if ( ty == 0 ) { if ( blkc == 0 && tx < kstan ) { buff[tx] = MAGMA_Z_ZERO; } } if ( (blkc % ngpu) == my_gpu_id ) { A += lda * (blkc/ngpu) * hemv_bs; // change if ( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_MAKE( 9999, 0 ); } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) { la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty*4; i < (ty*4+4); i++) { if ( i > tx ) { la[0][bank_shift * tx + i] = cuConj(la[0][ i * bank_shift + tx]); } } __syncthreads(); #pragma unroll for(int j=0; j < 4; j++) res += cuConj(la[0][bank_shift*tx+j+ty*4])* buff[j+ty*4]; __syncthreads(); A -= lda * (blkc/ngpu) * hemv_bs; } x -= (break_d + tx) * incx; // return to the beginning x += (my_gpu_id) * hemv_bs; // int wc_c = my_gpu_id; int total_blocks_gpu = gridDim.x /ngpu; if ( my_gpu_id < (gridDim.x % ngpu) ) { total_blocks_gpu += 1; } int shift = (blkc +1) /ngpu; if ( my_gpu_id < ( (blkc+1) % ngpu) ) { shift += 1; } #pragma unroll for(int s=0; s < shift; s++) { x += ngpu * hemv_bs; A += lda * hemv_bs; wc_c += ngpu; } WC += break_d + tx; int num_blocks_iters = total_blocks_gpu - shift; int count = 0; for(int s=0; s < num_blocks_iters; s++) { res_ = MAGMA_Z_ZERO; count++; if ( my_gpu_id == the_right_gpu && s == num_blocks_iters-1 ) { if ( ty == 0 ) { if ( tx > m_mod_thread_x ) { buff2[tx] = MAGMA_Z_ZERO; } else buff2[tx] = x[tx]; } #pragma unroll for(int j=0; j < hemv_bs; j += 8) { if ( ( ty + j ) > m_mod_thread_x ) { la[0][bank_shift*(ty+j)+tx] = MAGMA_Z_ZERO; } else la[0][bank_shift*(ty+j)+tx] = A[ j * lda]; } __syncthreads(); } // end of the_right_gpu else { #pragma unroll for(int j=0; j < hemv_bs; j += 8) la[0][ bank_shift * (ty+j) + tx] = A[ j * lda]; if ( ty == 0 ) { buff2[tx] = x[tx]; } // obtain the vector x store in buff; __syncthreads(); } #pragma unroll for(int j=0; j < 4; j++) { res += (la[0][bank_shift * (ty + j * 8) + tx] )* buff2[ ty + j * 8]; res_ += cuConj( la[0][bank_shift * tx + j + ty*4] ) * buff[j + ty*4]; //iterate colum } __syncthreads(); la[0][bank_shift*tx+ty] = res_; __syncthreads(); if ( ty == 0 ) { res2 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[wc_c*lda ] = res2; } __syncthreads(); wc_c += ngpu; x += ngpu * hemv_bs; A += lda * hemv_bs; } la[0][bank_shift*tx+ty] = res; __syncthreads(); if ( ty == 0 ) { res1 = la[0][tx*bank_shift+0]+la[0][tx*bank_shift+1] + la[0][tx*bank_shift+2]+la[0][tx*bank_shift+3] + la[0][tx*bank_shift+4]+la[0][tx*bank_shift+5] + la[0][tx*bank_shift+6]+la[0][tx*bank_shift+7]; WC[0+lda*(blkc)] = res1; } #endif /* (__CUDA_ARCH__ >= 200) */ } __global__ void zhemv_kernel_fermi_U_update_mgpu_offset_32( int n, magmaDoubleComplex alpha, magmaDoubleComplex *A, int lda, magmaDoubleComplex *x, int incx, magmaDoubleComplex beta, magmaDoubleComplex *y, int incy, magmaDoubleComplex *WC, int my_gpu_id, int ngpu, int nb, int kstan ) { #if (__CUDA_ARCH__ >= 200) int i; int tx = threadIdx.x; int ind = blockIdx.x * hemv_bs + tx; magmaDoubleComplex Ca; Ca = MAGMA_Z_ZERO; WC += blockIdx.x * lda + tx; for(i = 0; i < (blockIdx.x+1)*hemv_bs; i += hemv_bs) { Ca += WC[0]; WC += hemv_bs; } if ( ind < n && ind >= kstan ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca; #endif /* (__CUDA_ARCH__ >= 200) */ } extern "C" void magmablas_zhemv_fermi_U_mgpu_offset_32( magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex *dA, magma_int_t ldda, magmaDoubleComplex *dx, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *dy, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t my_gpu_id, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_int_t num_blocks_skipped, magma_int_t the_right_gpu) { magma_int_t the_chosen_block_id = offset / nb; magma_int_t kstan = offset % nb; dA += ldda * num_blocks_skipped * nb + the_chosen_block_id * nb; dx += the_chosen_block_id * nb; dy += the_chosen_block_id * nb; magma_int_t blocks = (n - 1)/hemv_bs + 1; blocks -= the_chosen_block_id; dim3 grid(blocks, 1, 1); dim3 threads(nb, 8, 1); dim3 threads_u(nb, 1, 1); /* * If matrix size is multiple of hemv_bs, we use a specific code. * otherwise, we call the generic case. */ if ( n % hemv_bs == 0 ) { zhemv_kernel_fermi_U_special_mgpu_offset_32<<< grid, threads, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } else { magma_int_t m_mod_thread_x = (n % hemv_bs) - 1; zhemv_kernel_fermi_U_generic_mgpu_offset_32<<< grid, threads, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, m_mod_thread_x, my_gpu_id, ngpu, nb, kstan, the_right_gpu); } zhemv_kernel_fermi_U_update_mgpu_offset_32<<< grid, threads_u, 0, magma_stream >>>( n, alpha, dA, ldda, dx, incx, beta, dy, incy, dwork, my_gpu_id, ngpu, nb, kstan); } /************************************************************************* Purpose ------- magmablas_zhemv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX*16 array of dimension ( LDDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] ldda INTEGER. On entry, LDDA specifies the first dimension of A as declared in the calling (sub) program. LDDA must be at least max( 1, n ). It is recommended that ldda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] dx COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in,out] dy COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. */ extern "C" magma_int_t magmablas_zhemv_mgpu_32_offset( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb, magma_int_t offset, magma_queue_t queues[][10]) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); magmablasSetKernelStream( queues[dev][0]); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % ngpu; magma_int_t num_blocks_skipped = the_chosen_block_id / ngpu; if ( dev < the_chosen_gpu_id ) { num_blocks_skipped += 1; } int new_gpu_id = ( dev + ngpu - the_chosen_gpu_id ) % ngpu; magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; the_right_gpu = ( the_right_gpu + ngpu - the_chosen_gpu_id ) % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_mgpu_32_offset( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb, magma_int_t offset) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); // magmablasSetKernelStream( queues[dev][0] ); magma_int_t the_chosen_block_id = offset / nb; magma_int_t the_chosen_gpu_id = the_chosen_block_id % ngpu; magma_int_t num_blocks_skipped = the_chosen_block_id / ngpu; if ( dev < the_chosen_gpu_id ) { num_blocks_skipped += 1; } int new_gpu_id = ( dev + ngpu - the_chosen_gpu_id ) % ngpu; magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; the_right_gpu = ( the_right_gpu + ngpu - the_chosen_gpu_id ) % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], new_gpu_id, ngpu, nb, offset, num_blocks_skipped); } } return MAGMA_SUCCESS; } extern "C" magma_int_t magmablas_zhemv2_mgpu_32( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dA[], magma_int_t ldda, magmaDoubleComplex_ptr dx[], magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy[], magma_int_t incy, magmaDoubleComplex_ptr dwork[], magma_int_t lwork, magma_int_t ngpu, magma_int_t nb) { magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // no CUDA ARCH 1.x version fprintf( stderr, "%s not supported on CUDA arch 1.x\n", __func__ ); return MAGMA_ERR_NOT_SUPPORTED; } // -------------------- // CUDA ARCH 2.x (Fermi) version int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ if ( (! upper) && (uplo != MagmaLower) ) { return -1; } else if ( n < 0 ) { return -2; } else if ( ldda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return MAGMA_SUCCESS; magma_int_t blocks = (n - 1)/hemv_bs + 1; magma_int_t lwmin = ldda * (blocks + 1); if ( lwork < lwmin ) { fprintf( stderr, "Not enough work space in %s: passed %d, required %d\n", __func__, (int) lwork, (int) lwmin); return -12; } if ( nb != 32 ) { fprintf( stderr, "Error in %s: nb != 32, please reallocate matrix among GPUs\n", __func__ ); return MAGMA_ERR_ILLEGAL_VALUE; } magma_int_t dev; for(dev=0; dev < ngpu; dev++) { magma_setdevice( dev ); magma_int_t the_right_block_id = n / nb; magma_int_t the_right_gpu = the_right_block_id % ngpu; // the_right_gpu is used in Upper generic case. if ( upper ) { magmablas_zhemv_fermi_U_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], dev, ngpu, nb, 0, 0, the_right_gpu); } else { magmablas_zhemv_fermi_L_mgpu_offset_32( n, alpha, dA[dev], ldda, dx[dev], incx, beta, dy[dev], incy, dwork[dev], dev, ngpu, nb, 0, 0); } } return MAGMA_SUCCESS; }
yolo_layer.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * yolo_layer.cu * * This code was originally written by wang-xinyu under MIT license. * I took it from: * * https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4 * * and made necessary modifications. * * - JK Jung */ #include "trt_yolo/yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(hipMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(hipMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), hipMemcpyHostToDevice)); assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } int YoloLayerPlugin::initialize() { return 0; } void YoloLayerPlugin::terminate() { CHECK(hipFree(mAnchors)); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() { } const char* YoloLayerPlugin::getPluginType() const { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const { return "1"; } void YoloLayerPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, hipStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(hipMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { hipLaunchKernelGGL(( CalDetection_NewCoords), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { hipLaunchKernelGGL(( CalDetection), dim3((num_elements + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, stream, inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, hipStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); } // namespace nvinfer1
yolo_layer.cu
/* * yolo_layer.cu * * This code was originally written by wang-xinyu under MIT license. * I took it from: * * https://github.com/wang-xinyu/tensorrtx/tree/master/yolov4 * * and made necessary modifications. * * - JK Jung */ #include "trt_yolo/yolo_layer.h" using namespace Yolo; namespace { // Write values into buffer template <typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } // Read values from buffer template <typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } // namespace namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int yolo_width, int yolo_height, int num_anchors, float* anchors, int num_classes, int input_width, int input_height, float scale_x_y, int new_coords) { mYoloWidth = yolo_width; mYoloHeight = yolo_height; mNumAnchors = num_anchors; memcpy(mAnchorsHost, anchors, num_anchors * 2 * sizeof(float)); mNumClasses = num_classes; mInputWidth = input_width; mInputHeight = input_height; mScaleXY = scale_x_y; mNewCoords = new_coords; CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); } YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mThreadCount); read(d, mYoloWidth); read(d, mYoloHeight); read(d, mNumAnchors); memcpy(mAnchorsHost, d, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); read(d, mNumClasses); read(d, mInputWidth); read(d, mInputHeight); read(d, mScaleXY); read(d, mNewCoords); CHECK(cudaMalloc(&mAnchors, MAX_ANCHORS * 2 * sizeof(float))); CHECK(cudaMemcpy(mAnchors, mAnchorsHost, mNumAnchors * 2 * sizeof(float), cudaMemcpyHostToDevice)); assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const { char* d = static_cast<char*>(buffer), *a = d; write(d, mThreadCount); write(d, mYoloWidth); write(d, mYoloHeight); write(d, mNumAnchors); memcpy(d, mAnchorsHost, MAX_ANCHORS * 2 * sizeof(float)); d += MAX_ANCHORS * 2 * sizeof(float); write(d, mNumClasses); write(d, mInputWidth); write(d, mInputHeight); write(d, mScaleXY); write(d, mNewCoords); assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const { return sizeof(mThreadCount) + \ sizeof(mYoloWidth) + sizeof(mYoloHeight) + \ sizeof(mNumAnchors) + MAX_ANCHORS * 2 * sizeof(float) + \ sizeof(mNumClasses) + \ sizeof(mInputWidth) + sizeof(mInputHeight) + \ sizeof(mScaleXY) + sizeof(mNewCoords); } int YoloLayerPlugin::initialize() { return 0; } void YoloLayerPlugin::terminate() { CHECK(cudaFree(mAnchors)); } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) { assert(index == 0); assert(nbInputDims == 1); assert(inputs[0].d[0] == (mNumClasses + 5) * mNumAnchors); assert(inputs[0].d[1] == mYoloHeight); assert(inputs[0].d[2] == mYoloWidth); // output detection results to the channel dimension int totalsize = mYoloWidth * mYoloHeight * mNumAnchors * sizeof(Detection) / sizeof(float); return Dims3(totalsize, 1, 1); } void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() { } const char* YoloLayerPlugin::getPluginType() const { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const { return "1"; } void YoloLayerPlugin::destroy() { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const { YoloLayerPlugin *p = new YoloLayerPlugin(mYoloWidth, mYoloHeight, mNumAnchors, (float*) mAnchorsHost, mNumClasses, mInputWidth, mInputHeight, mScaleXY, mNewCoords); p->setPluginNamespace(mPluginNamespace); return p; } inline __device__ float sigmoidGPU(float x) { return 1.0f / (1.0f + __expf(-x)); } inline __device__ float scale_sigmoidGPU(float x, float s) { return s * sigmoidGPU(x) - (s - 1.0f) * 0.5f; } // CalDetection(): This kernel processes 1 yolo layer calculation. It // distributes calculations so that 1 GPU thread would be responsible // for each grid/anchor combination. // NOTE: The output (x, y, w, h) are between 0.0 and 1.0 // (relative to orginal image width and height). __global__ void CalDetection(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_logit = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_logit) { max_cls_logit = l; class_id = i - 5; } } float max_cls_prob = sigmoidGPU(max_cls_logit); float box_prob = sigmoidGPU(*(cur_input + 4 * total_grids)); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale_sigmoidGPU(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale_sigmoidGPU(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = __expf(*(cur_input + 2 * total_grids)) * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = __expf(*(cur_input + 3 * total_grids)) * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } inline __device__ float scale(float x, float s) { return s * x - (s - 1.0f) * 0.5f; } inline __device__ float square(float x) { return x * x; } __global__ void CalDetection_NewCoords(const float *input, float *output, int batch_size, int yolo_width, int yolo_height, int num_anchors, const float *anchors, int num_classes, int input_w, int input_h, float scale_x_y) { int idx = threadIdx.x + blockDim.x * blockIdx.x; Detection* det = ((Detection*) output) + idx; int total_grids = yolo_width * yolo_height; if (idx >= batch_size * total_grids * num_anchors) return; int info_len = 5 + num_classes; //int batch_idx = idx / (total_grids * num_anchors); int group_idx = idx / total_grids; int anchor_idx = group_idx % num_anchors; const float* cur_input = input + group_idx * (info_len * total_grids) + (idx % total_grids); int class_id; float max_cls_prob = -CUDART_INF_F; // minus infinity for (int i = 5; i < info_len; ++i) { float l = *(cur_input + i * total_grids); if (l > max_cls_prob) { max_cls_prob = l; class_id = i - 5; } } float box_prob = *(cur_input + 4 * total_grids); //if (max_cls_prob < IGNORE_THRESH || box_prob < IGNORE_THRESH) // return; int row = (idx % total_grids) / yolo_width; int col = (idx % total_grids) % yolo_width; det->bbox[0] = (col + scale(*(cur_input + 0 * total_grids), scale_x_y)) / yolo_width; // [0, 1] det->bbox[1] = (row + scale(*(cur_input + 1 * total_grids), scale_x_y)) / yolo_height; // [0, 1] det->bbox[2] = square(*(cur_input + 2 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 0) / input_w; // [0, 1] det->bbox[3] = square(*(cur_input + 3 * total_grids)) * 4 * *(anchors + 2 * anchor_idx + 1) / input_h; // [0, 1] det->bbox[0] -= det->bbox[2] / 2; // shift from center to top-left det->bbox[1] -= det->bbox[3] / 2; det->det_confidence = box_prob; det->class_id = class_id; det->class_confidence = max_cls_prob; } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int batchSize) { int num_elements = batchSize * mNumAnchors * mYoloWidth * mYoloHeight; //CHECK(cudaMemset(output, 0, num_elements * sizeof(Detection))); if (mNewCoords) { CalDetection_NewCoords<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } else { CalDetection<<<(num_elements + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream>>> (inputs[0], output, batchSize, mYoloWidth, mYoloHeight, mNumAnchors, (const float*) mAnchors, mNumClasses, mInputWidth, mInputHeight, mScaleXY); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { assert(!strcmp(name, getPluginName())); const PluginField* fields = fc->fields; int yolo_width, yolo_height, num_anchors = 0; float anchors[MAX_ANCHORS * 2]; int num_classes, input_multiplier, new_coords = 0; float scale_x_y = 1.0; for (int i = 0; i < fc->nbFields; ++i) { const char* attrName = fields[i].name; if (!strcmp(attrName, "yoloWidth")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_width = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "yoloHeight")) { assert(fields[i].type == PluginFieldType::kINT32); yolo_height = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numAnchors")) { assert(fields[i].type == PluginFieldType::kINT32); num_anchors = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "numClasses")) { assert(fields[i].type == PluginFieldType::kINT32); num_classes = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "inputMultiplier")) { assert(fields[i].type == PluginFieldType::kINT32); input_multiplier = *(static_cast<const int*>(fields[i].data)); } else if (!strcmp(attrName, "anchors")){ assert(num_anchors > 0 && num_anchors <= MAX_ANCHORS); assert(fields[i].type == PluginFieldType::kFLOAT32); memcpy(anchors, static_cast<const float*>(fields[i].data), num_anchors * 2 * sizeof(float)); } else if (!strcmp(attrName, "scaleXY")) { assert(fields[i].type == PluginFieldType::kFLOAT32); scale_x_y = *(static_cast<const float*>(fields[i].data)); } else if (!strcmp(attrName, "newCoords")) { assert(fields[i].type == PluginFieldType::kINT32); new_coords = *(static_cast<const int*>(fields[i].data)); } else { std::cerr << "Unknown attribute: " << attrName << std::endl; assert(0); } } assert(yolo_width > 0 && yolo_height > 0); assert(anchors[0] > 0.0f && anchors[1] > 0.0f); assert(num_classes > 0); assert(input_multiplier == 8 || input_multiplier == 16 || input_multiplier == 32); assert(scale_x_y >= 1.0); YoloLayerPlugin* obj = new YoloLayerPlugin(yolo_width, yolo_height, num_anchors, anchors, num_classes, yolo_width * input_multiplier, yolo_height * input_multiplier, scale_x_y, new_coords); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(YoloPluginCreator); } // namespace nvinfer1
3a48944222503e2d75290269cc5731a62e8bd58d.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include <cstdlib> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #define SOFTENING 1e-9f /* * Each body contains x, y, and z coordinate positions, * as well as velocities in the x, y, and z directions. */ typedef struct { float x, y, z, vx, vy, vz; } Body; /* * Do not modify this function. A constraint of this exercise is * that it remain a host function. */ void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } /* * This function calculates the gravitational impact of all bodies in the system * on all others, but does not update their positions. */ void bodyForce(Body *p, float dt, int n) { for (int i = 0; i < n; ++i) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx * dx + dy * dy + dz * dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt * Fx; p[i].vy += dt * Fy; p[i].vz += dt * Fz; } } int main(const int argc, const char** argv) { /* * Do not change the value for `nBodies` here. If you would like to modify it, * pass values into the command line. */ int nBodies = 2 << 11; if (argc > 1) nBodies = 2 << atoi(argv[1]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies * sizeof(Body); float *buf; buf = (float *)malloc(bytes); Body *p = (Body*)buf; /* * As a constraint of this exercise, `randomizeBodies` must remain a host function. */ randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTimeInSeconds = 0.0; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ /*******************************************************************/ // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartGpuTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ bodyForce(p, dt, nBodies); // compute interbody forces /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ for (int i = 0; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } /*******************************************************************/ // Do not modify the code in this section. const double elapsedSeconds = GetGpuTimerInMiliseconds() / 1000.0; totalTimeInSeconds += elapsedSeconds; } double avgTime = totalTimeInSeconds / (double)(nIters); auto billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); /*******************************************************************/ /* * Feel free to modify code below. */ free(buf); }
3a48944222503e2d75290269cc5731a62e8bd58d.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #include <cstdlib> #include <device_launch_parameters.h> #include <cuda_runtime.h> #define SOFTENING 1e-9f /* * Each body contains x, y, and z coordinate positions, * as well as velocities in the x, y, and z directions. */ typedef struct { float x, y, z, vx, vy, vz; } Body; /* * Do not modify this function. A constraint of this exercise is * that it remain a host function. */ void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } /* * This function calculates the gravitational impact of all bodies in the system * on all others, but does not update their positions. */ void bodyForce(Body *p, float dt, int n) { for (int i = 0; i < n; ++i) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx * dx + dy * dy + dz * dz + SOFTENING; float invDist = rsqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt * Fx; p[i].vy += dt * Fy; p[i].vz += dt * Fz; } } int main(const int argc, const char** argv) { /* * Do not change the value for `nBodies` here. If you would like to modify it, * pass values into the command line. */ int nBodies = 2 << 11; if (argc > 1) nBodies = 2 << atoi(argv[1]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies * sizeof(Body); float *buf; buf = (float *)malloc(bytes); Body *p = (Body*)buf; /* * As a constraint of this exercise, `randomizeBodies` must remain a host function. */ randomizeBodies(buf, 6 * nBodies); // Init pos / vel data double totalTimeInSeconds = 0.0; /* * This simulation will run for 10 cycles of time, calculating gravitational * interaction amongst bodies, and adjusting their positions to reflect. */ /*******************************************************************/ // Do not modify these 2 lines of code. for (int iter = 0; iter < nIters; iter++) { StartGpuTimer(); /*******************************************************************/ /* * You will likely wish to refactor the work being done in `bodyForce`, * as well as the work to integrate the positions. */ bodyForce(p, dt, nBodies); // compute interbody forces /* * This position integration cannot occur until this round of `bodyForce` has completed. * Also, the next round of `bodyForce` cannot begin until the integration is complete. */ for (int i = 0; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } /*******************************************************************/ // Do not modify the code in this section. const double elapsedSeconds = GetGpuTimerInMiliseconds() / 1000.0; totalTimeInSeconds += elapsedSeconds; } double avgTime = totalTimeInSeconds / (double)(nIters); auto billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime; printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, billionsOfOpsPerSecond); /*******************************************************************/ /* * Feel free to modify code below. */ free(buf); }
c475ed085e0d007014a6999bd9178293c358f2a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "lodepng.h" #include <time.h> __global__ void process(unsigned char * d_in, unsigned char * d_out, int d_width, int d_height){ //Get the thread ID accross all the blocks int tid = blockIdx.x*blockDim.x+threadIdx.x; //A thread will go through with its ID representing a pixel and go through each //RGBA value and edit as necessary unsigned char max_R = 0, max_G = 0, max_B = 0, max_A = 0; int i = 2*(tid) % d_height; int j = 2*(blockIdx.x) % d_width; max_R = d_in[4*d_width*i + 4*j] > max_R ? d_in[4*d_width*i + 4*j]:max_R; max_R = d_in[4*d_width*i + 4*(j+1)] > max_R ? d_in[4*d_width*i + 4*(j+1)]:max_R; max_R = d_in[4*d_width*(i+1) + 4*j] > max_R ? d_in[4*d_width*(i+1) + 4*j]:max_R; max_R = d_in[4*d_width*(i+1) + 4*(j+1)] > max_R ? d_in[4*d_width*(i+1) + 4*(j+1)]:max_R; d_out[d_width*i + 2*j] = max_R; max_G = d_in[4*d_width*i + 4*j + 1] > max_G ? d_in[4*d_width*i + 4*j + 1]:max_G; max_G = d_in[4*d_width*i + 4*(j+1) + 1] > max_G ? d_in[4*d_width*i + 4*(j+1) + 1]:max_G; max_G = d_in[4*d_width*(i+1) + 4*j + 1] > max_G ? d_in[4*d_width*(i+1) + 4*j + 1]:max_G; max_G = d_in[4*d_width*(i+1) + 4*(j+1) + 1] > max_G ? d_in[4*d_width*(i+1) + 4*(j+1) + 1]:max_G; d_out[d_width*i + 2*j + 1] = max_G; max_B = d_in[4*d_width*i + 4*j + 2] > max_B ? d_in[4*d_width*i + 4*j + 2]:max_B; max_B = d_in[4*d_width*i + 4*(j+1) + 2] > max_B ? d_in[4*d_width*i + 4*(j+1) + 2]:max_B; max_B = d_in[4*d_width*(i+1) + 4*j + 2] > max_B ? d_in[4*d_width*(i+1) + 4*j + 2]:max_B; max_B = d_in[4*d_width*(i+1) + 4*(j+1) + 2] > max_B ? d_in[4*d_width*(i+1) + 4*(j+1) + 2]:max_B; d_out[d_width*i + 2*j + 2] = max_B; max_A = d_in[4*d_width*i + 4*j + 3] > max_A ? d_in[4*d_width*i + 4*j + 3]:max_A; max_A = d_in[4*d_width*i + 4*(j+1) + 3] > max_A ? d_in[4*d_width*i + 4*(j+1) + 3]:max_A; max_A = d_in[4*d_width*(i+1) + 4*j + 3] > max_A ? d_in[4*d_width*(i+1) + 4*j + 3]:max_A; max_A = d_in[4*d_width*(i+1) + 4*(j+1) + 3] > max_A ? d_in[4*d_width*(i+1) + 4*(j+1) + 3]:max_A; d_out[d_width*i + 2*j + 3] = max_A; } int main(int argc, char *argv[]){ //Gets input and output filenames char * input_filename = argv[1]; char * output_filename = argv[2]; printf("%i\n%s\n%s\n",argc,input_filename,output_filename); //Data init. unsigned error; unsigned char *in, *out; unsigned char *d_in, *d_out; unsigned width, height; int size; //Load the input file and turn it into an unsigned char array called 'in' with 'width' and 'height' error = lodepng_decode32_file(&in, &width, &height, input_filename); if(error){ printf("error %u: %s\n", error, lodepng_error_text(error)); return 0; } //The size in numbers of values in the 'in' array size = width*height*4*sizeof(char); //'out' is the new unsigned char array for the editted values from 'in' out = (unsigned char*)malloc(size/4); //Malloc within the GPU //Give name of the data items we use and their sizes hipMalloc(&d_in, size); hipMalloc(&d_out, size/4); //Copy over 'in' to 'd_in' in the GPU //Can't just move 'in' to GPU evidently hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); int threadsPerBlock = height; int numBlocks = width; printf("Threads per block: %i\tNumber of blocks: %i\n",threadsPerBlock,numBlocks); //We start the kernel with the number of blocks we want with the number of threads per block we want //Pass the values we allocated in the GPU before so the GPU knows which data items it can use in this kernel clock_t tic = clock(); hipLaunchKernelGGL(( process), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_in, d_out, width, height); //Kernels are blocking; use as point of synchronization //Copy the values from 'd_out' in GPU memory into 'out' in the CPU memory hipMemcpy(out, d_out, size/4, hipMemcpyDeviceToHost); //Free all that memory in the GPU hipFree(d_out); hipFree(d_in); clock_t toc = clock(); printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC); //send 'out' to be turned into a picture with output name lodepng_encode32_file(output_filename, out, width/2, height/2); //free all the things free(out); free(in); return 0; }
c475ed085e0d007014a6999bd9178293c358f2a0.cu
#include <stdio.h> #include <stdlib.h> #include "lodepng.h" #include <time.h> __global__ void process(unsigned char * d_in, unsigned char * d_out, int d_width, int d_height){ //Get the thread ID accross all the blocks int tid = blockIdx.x*blockDim.x+threadIdx.x; //A thread will go through with its ID representing a pixel and go through each //RGBA value and edit as necessary unsigned char max_R = 0, max_G = 0, max_B = 0, max_A = 0; int i = 2*(tid) % d_height; int j = 2*(blockIdx.x) % d_width; max_R = d_in[4*d_width*i + 4*j] > max_R ? d_in[4*d_width*i + 4*j]:max_R; max_R = d_in[4*d_width*i + 4*(j+1)] > max_R ? d_in[4*d_width*i + 4*(j+1)]:max_R; max_R = d_in[4*d_width*(i+1) + 4*j] > max_R ? d_in[4*d_width*(i+1) + 4*j]:max_R; max_R = d_in[4*d_width*(i+1) + 4*(j+1)] > max_R ? d_in[4*d_width*(i+1) + 4*(j+1)]:max_R; d_out[d_width*i + 2*j] = max_R; max_G = d_in[4*d_width*i + 4*j + 1] > max_G ? d_in[4*d_width*i + 4*j + 1]:max_G; max_G = d_in[4*d_width*i + 4*(j+1) + 1] > max_G ? d_in[4*d_width*i + 4*(j+1) + 1]:max_G; max_G = d_in[4*d_width*(i+1) + 4*j + 1] > max_G ? d_in[4*d_width*(i+1) + 4*j + 1]:max_G; max_G = d_in[4*d_width*(i+1) + 4*(j+1) + 1] > max_G ? d_in[4*d_width*(i+1) + 4*(j+1) + 1]:max_G; d_out[d_width*i + 2*j + 1] = max_G; max_B = d_in[4*d_width*i + 4*j + 2] > max_B ? d_in[4*d_width*i + 4*j + 2]:max_B; max_B = d_in[4*d_width*i + 4*(j+1) + 2] > max_B ? d_in[4*d_width*i + 4*(j+1) + 2]:max_B; max_B = d_in[4*d_width*(i+1) + 4*j + 2] > max_B ? d_in[4*d_width*(i+1) + 4*j + 2]:max_B; max_B = d_in[4*d_width*(i+1) + 4*(j+1) + 2] > max_B ? d_in[4*d_width*(i+1) + 4*(j+1) + 2]:max_B; d_out[d_width*i + 2*j + 2] = max_B; max_A = d_in[4*d_width*i + 4*j + 3] > max_A ? d_in[4*d_width*i + 4*j + 3]:max_A; max_A = d_in[4*d_width*i + 4*(j+1) + 3] > max_A ? d_in[4*d_width*i + 4*(j+1) + 3]:max_A; max_A = d_in[4*d_width*(i+1) + 4*j + 3] > max_A ? d_in[4*d_width*(i+1) + 4*j + 3]:max_A; max_A = d_in[4*d_width*(i+1) + 4*(j+1) + 3] > max_A ? d_in[4*d_width*(i+1) + 4*(j+1) + 3]:max_A; d_out[d_width*i + 2*j + 3] = max_A; } int main(int argc, char *argv[]){ //Gets input and output filenames char * input_filename = argv[1]; char * output_filename = argv[2]; printf("%i\n%s\n%s\n",argc,input_filename,output_filename); //Data init. unsigned error; unsigned char *in, *out; unsigned char *d_in, *d_out; unsigned width, height; int size; //Load the input file and turn it into an unsigned char array called 'in' with 'width' and 'height' error = lodepng_decode32_file(&in, &width, &height, input_filename); if(error){ printf("error %u: %s\n", error, lodepng_error_text(error)); return 0; } //The size in numbers of values in the 'in' array size = width*height*4*sizeof(char); //'out' is the new unsigned char array for the editted values from 'in' out = (unsigned char*)malloc(size/4); //Malloc within the GPU //Give name of the data items we use and their sizes cudaMalloc(&d_in, size); cudaMalloc(&d_out, size/4); //Copy over 'in' to 'd_in' in the GPU //Can't just move 'in' to GPU evidently cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); int threadsPerBlock = height; int numBlocks = width; printf("Threads per block: %i\tNumber of blocks: %i\n",threadsPerBlock,numBlocks); //We start the kernel with the number of blocks we want with the number of threads per block we want //Pass the values we allocated in the GPU before so the GPU knows which data items it can use in this kernel clock_t tic = clock(); process<<<numBlocks,threadsPerBlock>>>(d_in, d_out, width, height); //Kernels are blocking; use as point of synchronization //Copy the values from 'd_out' in GPU memory into 'out' in the CPU memory cudaMemcpy(out, d_out, size/4, cudaMemcpyDeviceToHost); //Free all that memory in the GPU cudaFree(d_out); cudaFree(d_in); clock_t toc = clock(); printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC); //send 'out' to be turned into a picture with output name lodepng_encode32_file(output_filename, out, width/2, height/2); //free all the things free(out); free(in); return 0; }
299ed51528ad7247ee733a10fa5161206b716d1a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <sstream> #include "dataStructures/helper/matrix_helper.h" #include "dataStructures/matrix_element.hpp" #include "dataStructures/sparse_matrix.hpp" #include "hd_data.hpp" #include "helper/cuda/cuda_error_check.h" #include "helper/cuda/cuda_reduction_operation.hpp" #include "helper/cuda/cuda_thread_manager.hpp" #include "helper/cuda/cusolverSP_error_check.h" #include "helper/cuda/cusparse_error_check.h" #include "matrixOperations/basic_operations.hpp" #include "matrixOperations/row_ordering.hpp" __host__ d_spmatrix::d_spmatrix() : d_spmatrix(0, 0){}; __host__ d_spmatrix::d_spmatrix(int rows, int cols, int nnz, matrix_type type, bool is_device) : nnz(nnz), rows(rows), cols(cols), is_device(is_device), type(type), loaded_elements(nnz) { mem_alloc(); } __host__ d_spmatrix::d_spmatrix(const d_spmatrix &m, bool copyToOtherMem) : d_spmatrix(m.rows, m.cols, m.nnz, m.type, m.is_device ^ copyToOtherMem) { loaded_elements = m.loaded_elements; assert(m.loaded_elements == m.nnz); hipMemcpyKind memCpy = (m.is_device) ? (is_device) ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost : (is_device) ? hipMemcpyHostToDevice : hipMemcpyHostToHost; gpuErrchk(hipMemcpy(data, m.data, sizeof(T) * nnz, memCpy)); gpuErrchk(hipMemcpy(colPtr, m.colPtr, sizeof(int) * ((type == CSC) ? cols + 1 : nnz), memCpy)); gpuErrchk(hipMemcpy(rowPtr, m.rowPtr, sizeof(int) * ((type == CSR) ? rows + 1 : nnz), memCpy)); } __host__ void d_spmatrix::operator=(const d_spmatrix &other) { assert(is_device == is_device); mem_free(); nnz = other.nnz; rows = other.rows; cols = other.cols; loaded_elements = other.loaded_elements; type = other.type; mem_alloc(); hipMemcpyKind memCpy = (is_device) ? hipMemcpyDeviceToDevice : hipMemcpyHostToHost; gpuErrchk(hipMemcpy(data, other.data, sizeof(T) * nnz, memCpy)); gpuErrchk(hipMemcpy(colPtr, other.colPtr, sizeof(int) * ((type == CSC) ? cols + 1 : nnz), memCpy)); gpuErrchk(hipMemcpy(rowPtr, other.rowPtr, sizeof(int) * ((type == CSR) ? rows + 1 : nnz), memCpy)); } __host__ bool d_spmatrix::operator==(const d_spmatrix &other) { if (is_device) { hd_data<bool> result(true); hipLaunchKernelGGL(( is_equalK), dim3(1), dim3(1), 0, 0, *(this->_device), *(other._device), result(true)); result.update_host(); return result(); gpuErrchk(hipDeviceSynchronize()); } else return is_equalBody(*this, other); } __host__ void d_spmatrix::mem_alloc() { if (nnz == 0) return; int rowPtrSize = (type == CSR) ? rows + 1 : nnz; int colPtrSize = (type == CSC) ? cols + 1 : nnz; if (is_device) { gpuErrchk(hipMalloc(&data, nnz * sizeof(T))); gpuErrchk(hipMalloc(&rowPtr, rowPtrSize * sizeof(int))); gpuErrchk(hipMalloc(&colPtr, colPtrSize * sizeof(int))); gpuErrchk(hipMalloc(&_device, sizeof(d_spmatrix))); gpuErrchk(hipMemcpy(_device, this, sizeof(d_spmatrix), hipMemcpyHostToDevice)); } else { data = new T[nnz]; rowPtr = new int[rowPtrSize]; for (int i = 0; i < rowPtrSize; i++) rowPtr[i] = 0; colPtr = new int[colPtrSize]; for (int i = 0; i < colPtrSize; i++) colPtr[i] = 0; } } __host__ void d_spmatrix::mem_free() { if (nnz > 0) if (is_device) { gpuErrchk(hipFree(data)); gpuErrchk(hipFree(rowPtr)); gpuErrchk(hipFree(colPtr)); gpuErrchk(hipFree(_device)); } else { delete[] data; delete[] rowPtr; delete[] colPtr; } } __host__ std::string d_spmatrix::to_string() { int printCount = 5; std::stringstream strs; char buffer[50]; sprintf(buffer, "Matrix :\n%i %i %i/%i isDev=%i format=", rows, cols, loaded_elements, nnz, is_device); strs << std::string(buffer); switch (type) { case COO: strs << "COO\n"; break; case CSR: strs << "CSR\n"; break; case CSC: strs << "CSC\n"; break; } for (matrix_elm elm(this); elm.has_next(); elm.next()) { strs << elm.to_string(); printCount--; if (printCount <= 0) { if (elm.has_next()) strs << "...\n"; break; } } return strs.str(); } __host__ __device__ void d_spmatrix::print(int printCount) const { #ifndef __CUDA_ARCH__ if (is_device) { hipLaunchKernelGGL(( print_matrixK), dim3(1), dim3(1), 0, 0, _device, printCount); gpuErrchk(hipDeviceSynchronize()); } else #endif print_matrixBody(this, printCount); } __host__ void d_spmatrix::set_nnz(int nnz) { mem_free(); this->nnz = nnz; this->loaded_elements = nnz; mem_alloc(); } __host__ void d_spmatrix::start_filling() { loaded_elements = 0; if (is_device) { gpuErrchk(hipFree(_device)); gpuErrchk(hipMalloc(&_device, sizeof(d_spmatrix))); gpuErrchk(hipMemcpy(_device, this, sizeof(d_spmatrix), hipMemcpyHostToDevice)); loaded_elements = nnz; } } __host__ __device__ void d_spmatrix::add_element(int i, int j, T val) { #ifndef __CUDA_ARCH__ if (is_device) { hipLaunchKernelGGL(( add_elementK), dim3(1), dim3(1), 0, 0, _device, i, j, val); gpuErrchk(hipDeviceSynchronize()); } else #endif add_elementBody(this, i, j, val); } // Get the value at index k of the sparse matrix __host__ __device__ const T &d_spmatrix::get(int k) const { return data[k]; } __host__ __device__ const T &d_spmatrix::get_line(int i) const { if (type != CSR) { printf("Error! Doesn't work with other type than CSR"); } return data[rowPtr[i]]; } __host__ __device__ T d_spmatrix::lookup(int i, int j) const { for (matrix_elm elm(this); elm.has_next(); elm.next()) if (elm.i == i && elm.j == j) return *elm.val; return 0; } __host__ void d_spmatrix::to_compress_dtype(matrix_type toType) { if (toType == COO) { if (is_convertible_to(CSR)) toType = CSR; else if (is_convertible_to(CSC)) toType = CSC; else { printf("Not convertible to any type!\n"); return; } } else { assert(is_convertible_to(toType)); } int newSize = (toType == CSR) ? rows + 1 : cols + 1; int *newArray; if (is_device) { gpuErrchk(hipMalloc(&newArray, newSize * sizeof(int))); hipLaunchKernelGGL(( convert_arrayK), dim3(1), dim3(1), 0, 0, _device, (toType == CSR) ? rowPtr : colPtr, newArray, newSize); hipFree((toType == CSR) ? rowPtr : colPtr); } else { newArray = new int[newSize]; convert_arrayBody(this, (toType == CSR) ? rowPtr : colPtr, newArray, newSize); if (toType == CSR) delete[] rowPtr; else delete[] colPtr; } if (toType == CSR) rowPtr = newArray; else colPtr = newArray; type = toType; if (is_device) { loaded_elements = nnz; // Warning!! There is no assert to protect this! gpuErrchk(hipMemcpy(_device, this, sizeof(d_spmatrix), hipMemcpyHostToDevice)); gpuErrchk(hipDeviceSynchronize()); } } __host__ bool d_spmatrix::is_convertible_to(matrix_type toType) const { assert(toType != type); if (toType == COO) return true; if (type != COO) return false; int *analyzedArray = (toType == CSR) ? rowPtr : colPtr; bool isOK = true; if (is_device) { bool *_isOK; gpuErrchk(hipMalloc(&_isOK, sizeof(bool))); hipLaunchKernelGGL(( checkOrdered), dim3(1), dim3(1), 0, 0, analyzedArray, nnz, _isOK); gpuErrchk( hipMemcpy(&isOK, _isOK, sizeof(bool), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(_isOK)); gpuErrchk(hipDeviceSynchronize()); } else { check_orderedBody(analyzedArray, nnz, &isOK); } return isOK; } __host__ void d_spmatrix::to_csr() { if (type == CSR) throw("Error! Already CSR type \n"); if (type == CSC) throw("Error! Already CSC type \n"); if (!is_convertible_to(CSR)) { RowOrdering(*this); } assert(is_convertible_to(CSR)); to_compress_dtype(CSR); assert(type == CSR); } __host__ hipsparseMatDescr_t d_spmatrix::make_descriptor() { hipsparseMatDescr_t descr; cusparseErrchk(hipsparseCreateMatDescr(&descr)); hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO); return descr; } __host__ hipsparseSpMatDescr_t d_spmatrix::make_sp_descriptor() { hipsparseSpMatDescr_t descr; cusparseErrchk(hipsparseCreateCsr( &descr, rows, cols, nnz, rowPtr, colPtr, data, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO, T_Cuda)); return std::move(descr); } __host__ bool d_spmatrix::is_symetric() { bool *_return = new bool; if (is_device) { bool *_returnGpu; gpuErrchk(hipMalloc(&_returnGpu, sizeof(bool))); hipLaunchKernelGGL(( is_symetricK), dim3(1), dim3(1), 0, 0, _device, _returnGpu); gpuErrchk(hipDeviceSynchronize()); gpuErrchk(hipMemcpy(_return, _returnGpu, sizeof(bool), hipMemcpyDeviceToHost)); gpuErrchk(hipFree(_returnGpu)); gpuErrchk(hipDeviceSynchronize()); } else { is_symetricBody(this, _return); } return *_return; } typedef hipsparseStatus_t (*FuncSpar)(...); __host__ void d_spmatrix::operation_cusparse(void *function, hipsparseHandle_t &handle, bool addValues, void *pointer1, void *pointer2) { if (addValues) { printf("This function is not complete\n"); } else { if (pointer1) if (pointer2) { cusparseErrchk(((FuncSpar)function)(handle, rows, cols, nnz, rowPtr, colPtr, pointer1, pointer2)); } else { cusparseErrchk(((FuncSpar)function)(handle, rows, cols, nnz, rowPtr, colPtr, pointer1)); } else printf("This function is not complete\n"); } } typedef cusolverStatus_t (*FuncSolv)(...); __host__ void d_spmatrix::operation_cusolver(void *function, cusolverSpHandle_t &handle, hipsparseMatDescr_t descr, T *b, T *xOut, int *singularOut) { cusolverErrchk(((FuncSolv)function)(handle, rows, nnz, descr, data, rowPtr, colPtr, b, 0.0, 0, xOut, singularOut)); // TODO : SymOptimization } __host__ void d_spmatrix::make_datawidth() { if (dataWidth >= 0) printf("Warning! Data width has already been computed.\n"); dim3Pair threadblock = make1DThreadBlock(rows); d_vector width(rows); hipLaunchKernelGGL(( get_datawidthK), dim3(threadblock.block), dim3(threadblock.thread), 0, 0, *_device, *(d_vector *)width._device); ReductionOperation(width, maximum); T dataWidthFloat; hipMemcpy(&dataWidthFloat, width.data, sizeof(T), hipMemcpyDeviceToHost); dataWidth = (int)dataWidthFloat; } __host__ d_spmatrix::~d_spmatrix() { mem_free(); }
299ed51528ad7247ee733a10fa5161206b716d1a.cu
#include <assert.h> #include <sstream> #include "dataStructures/helper/matrix_helper.h" #include "dataStructures/matrix_element.hpp" #include "dataStructures/sparse_matrix.hpp" #include "hd_data.hpp" #include "helper/cuda/cuda_error_check.h" #include "helper/cuda/cuda_reduction_operation.hpp" #include "helper/cuda/cuda_thread_manager.hpp" #include "helper/cuda/cusolverSP_error_check.h" #include "helper/cuda/cusparse_error_check.h" #include "matrixOperations/basic_operations.hpp" #include "matrixOperations/row_ordering.hpp" __host__ d_spmatrix::d_spmatrix() : d_spmatrix(0, 0){}; __host__ d_spmatrix::d_spmatrix(int rows, int cols, int nnz, matrix_type type, bool is_device) : nnz(nnz), rows(rows), cols(cols), is_device(is_device), type(type), loaded_elements(nnz) { mem_alloc(); } __host__ d_spmatrix::d_spmatrix(const d_spmatrix &m, bool copyToOtherMem) : d_spmatrix(m.rows, m.cols, m.nnz, m.type, m.is_device ^ copyToOtherMem) { loaded_elements = m.loaded_elements; assert(m.loaded_elements == m.nnz); cudaMemcpyKind memCpy = (m.is_device) ? (is_device) ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost : (is_device) ? cudaMemcpyHostToDevice : cudaMemcpyHostToHost; gpuErrchk(cudaMemcpy(data, m.data, sizeof(T) * nnz, memCpy)); gpuErrchk(cudaMemcpy(colPtr, m.colPtr, sizeof(int) * ((type == CSC) ? cols + 1 : nnz), memCpy)); gpuErrchk(cudaMemcpy(rowPtr, m.rowPtr, sizeof(int) * ((type == CSR) ? rows + 1 : nnz), memCpy)); } __host__ void d_spmatrix::operator=(const d_spmatrix &other) { assert(is_device == is_device); mem_free(); nnz = other.nnz; rows = other.rows; cols = other.cols; loaded_elements = other.loaded_elements; type = other.type; mem_alloc(); cudaMemcpyKind memCpy = (is_device) ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToHost; gpuErrchk(cudaMemcpy(data, other.data, sizeof(T) * nnz, memCpy)); gpuErrchk(cudaMemcpy(colPtr, other.colPtr, sizeof(int) * ((type == CSC) ? cols + 1 : nnz), memCpy)); gpuErrchk(cudaMemcpy(rowPtr, other.rowPtr, sizeof(int) * ((type == CSR) ? rows + 1 : nnz), memCpy)); } __host__ bool d_spmatrix::operator==(const d_spmatrix &other) { if (is_device) { hd_data<bool> result(true); is_equalK<<<1, 1>>>(*(this->_device), *(other._device), result(true)); result.update_host(); return result(); gpuErrchk(cudaDeviceSynchronize()); } else return is_equalBody(*this, other); } __host__ void d_spmatrix::mem_alloc() { if (nnz == 0) return; int rowPtrSize = (type == CSR) ? rows + 1 : nnz; int colPtrSize = (type == CSC) ? cols + 1 : nnz; if (is_device) { gpuErrchk(cudaMalloc(&data, nnz * sizeof(T))); gpuErrchk(cudaMalloc(&rowPtr, rowPtrSize * sizeof(int))); gpuErrchk(cudaMalloc(&colPtr, colPtrSize * sizeof(int))); gpuErrchk(cudaMalloc(&_device, sizeof(d_spmatrix))); gpuErrchk(cudaMemcpy(_device, this, sizeof(d_spmatrix), cudaMemcpyHostToDevice)); } else { data = new T[nnz]; rowPtr = new int[rowPtrSize]; for (int i = 0; i < rowPtrSize; i++) rowPtr[i] = 0; colPtr = new int[colPtrSize]; for (int i = 0; i < colPtrSize; i++) colPtr[i] = 0; } } __host__ void d_spmatrix::mem_free() { if (nnz > 0) if (is_device) { gpuErrchk(cudaFree(data)); gpuErrchk(cudaFree(rowPtr)); gpuErrchk(cudaFree(colPtr)); gpuErrchk(cudaFree(_device)); } else { delete[] data; delete[] rowPtr; delete[] colPtr; } } __host__ std::string d_spmatrix::to_string() { int printCount = 5; std::stringstream strs; char buffer[50]; sprintf(buffer, "Matrix :\n%i %i %i/%i isDev=%i format=", rows, cols, loaded_elements, nnz, is_device); strs << std::string(buffer); switch (type) { case COO: strs << "COO\n"; break; case CSR: strs << "CSR\n"; break; case CSC: strs << "CSC\n"; break; } for (matrix_elm elm(this); elm.has_next(); elm.next()) { strs << elm.to_string(); printCount--; if (printCount <= 0) { if (elm.has_next()) strs << "...\n"; break; } } return strs.str(); } __host__ __device__ void d_spmatrix::print(int printCount) const { #ifndef __CUDA_ARCH__ if (is_device) { print_matrixK<<<1, 1>>>(_device, printCount); gpuErrchk(cudaDeviceSynchronize()); } else #endif print_matrixBody(this, printCount); } __host__ void d_spmatrix::set_nnz(int nnz) { mem_free(); this->nnz = nnz; this->loaded_elements = nnz; mem_alloc(); } __host__ void d_spmatrix::start_filling() { loaded_elements = 0; if (is_device) { gpuErrchk(cudaFree(_device)); gpuErrchk(cudaMalloc(&_device, sizeof(d_spmatrix))); gpuErrchk(cudaMemcpy(_device, this, sizeof(d_spmatrix), cudaMemcpyHostToDevice)); loaded_elements = nnz; } } __host__ __device__ void d_spmatrix::add_element(int i, int j, T val) { #ifndef __CUDA_ARCH__ if (is_device) { add_elementK<<<1, 1>>>(_device, i, j, val); gpuErrchk(cudaDeviceSynchronize()); } else #endif add_elementBody(this, i, j, val); } // Get the value at index k of the sparse matrix __host__ __device__ const T &d_spmatrix::get(int k) const { return data[k]; } __host__ __device__ const T &d_spmatrix::get_line(int i) const { if (type != CSR) { printf("Error! Doesn't work with other type than CSR"); } return data[rowPtr[i]]; } __host__ __device__ T d_spmatrix::lookup(int i, int j) const { for (matrix_elm elm(this); elm.has_next(); elm.next()) if (elm.i == i && elm.j == j) return *elm.val; return 0; } __host__ void d_spmatrix::to_compress_dtype(matrix_type toType) { if (toType == COO) { if (is_convertible_to(CSR)) toType = CSR; else if (is_convertible_to(CSC)) toType = CSC; else { printf("Not convertible to any type!\n"); return; } } else { assert(is_convertible_to(toType)); } int newSize = (toType == CSR) ? rows + 1 : cols + 1; int *newArray; if (is_device) { gpuErrchk(cudaMalloc(&newArray, newSize * sizeof(int))); convert_arrayK<<<1, 1>>>(_device, (toType == CSR) ? rowPtr : colPtr, newArray, newSize); cudaFree((toType == CSR) ? rowPtr : colPtr); } else { newArray = new int[newSize]; convert_arrayBody(this, (toType == CSR) ? rowPtr : colPtr, newArray, newSize); if (toType == CSR) delete[] rowPtr; else delete[] colPtr; } if (toType == CSR) rowPtr = newArray; else colPtr = newArray; type = toType; if (is_device) { loaded_elements = nnz; // Warning!! There is no assert to protect this! gpuErrchk(cudaMemcpy(_device, this, sizeof(d_spmatrix), cudaMemcpyHostToDevice)); gpuErrchk(cudaDeviceSynchronize()); } } __host__ bool d_spmatrix::is_convertible_to(matrix_type toType) const { assert(toType != type); if (toType == COO) return true; if (type != COO) return false; int *analyzedArray = (toType == CSR) ? rowPtr : colPtr; bool isOK = true; if (is_device) { bool *_isOK; gpuErrchk(cudaMalloc(&_isOK, sizeof(bool))); checkOrdered<<<1, 1>>>(analyzedArray, nnz, _isOK); gpuErrchk( cudaMemcpy(&isOK, _isOK, sizeof(bool), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(_isOK)); gpuErrchk(cudaDeviceSynchronize()); } else { check_orderedBody(analyzedArray, nnz, &isOK); } return isOK; } __host__ void d_spmatrix::to_csr() { if (type == CSR) throw("Error! Already CSR type \n"); if (type == CSC) throw("Error! Already CSC type \n"); if (!is_convertible_to(CSR)) { RowOrdering(*this); } assert(is_convertible_to(CSR)); to_compress_dtype(CSR); assert(type == CSR); } __host__ cusparseMatDescr_t d_spmatrix::make_descriptor() { cusparseMatDescr_t descr; cusparseErrchk(cusparseCreateMatDescr(&descr)); cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO); return descr; } __host__ cusparseSpMatDescr_t d_spmatrix::make_sp_descriptor() { cusparseSpMatDescr_t descr; cusparseErrchk(cusparseCreateCsr( &descr, rows, cols, nnz, rowPtr, colPtr, data, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO, T_Cuda)); return std::move(descr); } __host__ bool d_spmatrix::is_symetric() { bool *_return = new bool; if (is_device) { bool *_returnGpu; gpuErrchk(cudaMalloc(&_returnGpu, sizeof(bool))); is_symetricK<<<1, 1>>>(_device, _returnGpu); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(_return, _returnGpu, sizeof(bool), cudaMemcpyDeviceToHost)); gpuErrchk(cudaFree(_returnGpu)); gpuErrchk(cudaDeviceSynchronize()); } else { is_symetricBody(this, _return); } return *_return; } typedef cusparseStatus_t (*FuncSpar)(...); __host__ void d_spmatrix::operation_cusparse(void *function, cusparseHandle_t &handle, bool addValues, void *pointer1, void *pointer2) { if (addValues) { printf("This function is not complete\n"); } else { if (pointer1) if (pointer2) { cusparseErrchk(((FuncSpar)function)(handle, rows, cols, nnz, rowPtr, colPtr, pointer1, pointer2)); } else { cusparseErrchk(((FuncSpar)function)(handle, rows, cols, nnz, rowPtr, colPtr, pointer1)); } else printf("This function is not complete\n"); } } typedef cusolverStatus_t (*FuncSolv)(...); __host__ void d_spmatrix::operation_cusolver(void *function, cusolverSpHandle_t &handle, cusparseMatDescr_t descr, T *b, T *xOut, int *singularOut) { cusolverErrchk(((FuncSolv)function)(handle, rows, nnz, descr, data, rowPtr, colPtr, b, 0.0, 0, xOut, singularOut)); // TODO : SymOptimization } __host__ void d_spmatrix::make_datawidth() { if (dataWidth >= 0) printf("Warning! Data width has already been computed.\n"); dim3Pair threadblock = make1DThreadBlock(rows); d_vector width(rows); get_datawidthK<<<threadblock.block, threadblock.thread>>>( *_device, *(d_vector *)width._device); ReductionOperation(width, maximum); T dataWidthFloat; cudaMemcpy(&dataWidthFloat, width.data, sizeof(T), cudaMemcpyDeviceToHost); dataWidth = (int)dataWidthFloat; } __host__ d_spmatrix::~d_spmatrix() { mem_free(); }
f79be48292bd6c56b3f075c99a0b605746249a6a.hip
// !!! This is a file automatically generated by hipify!!! #include "init_hip.cuh" void init_params() { for(int i=0;i<CONV_W_NUM;i++) { for(int j=0;j<CONV_W_SIZE;j++) for(int k=0;k<CONV_W_SIZE;k++) conv_w[i][j][k]=get_rand(CONV_W_SIZE*CONV_W_SIZE); conv_b[i]=get_rand(CONV_W_SIZE*CONV_W_SIZE); } for(int i=0;i<FC1_SIZE;i++) { for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) fc1_w[i][j][k][l]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM); fc1_b[i]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM); } for(int i=0;i<FC2_SIZE;i++) { for(int j=0;j<FC1_SIZE;j++) fc2_w[i][j]=get_rand(FC1_SIZE); fc2_b[i]=get_rand(FC1_SIZE); } }
f79be48292bd6c56b3f075c99a0b605746249a6a.cu
#include "init.cuh" void init_params() { for(int i=0;i<CONV_W_NUM;i++) { for(int j=0;j<CONV_W_SIZE;j++) for(int k=0;k<CONV_W_SIZE;k++) conv_w[i][j][k]=get_rand(CONV_W_SIZE*CONV_W_SIZE); conv_b[i]=get_rand(CONV_W_SIZE*CONV_W_SIZE); } for(int i=0;i<FC1_SIZE;i++) { for(int j=0;j<CONV_W_NUM;j++) for(int k=0;k<POOL_SIZE;k++) for(int l=0;l<POOL_SIZE;l++) fc1_w[i][j][k][l]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM); fc1_b[i]=get_rand(POOL_SIZE*POOL_SIZE*CONV_W_NUM); } for(int i=0;i<FC2_SIZE;i++) { for(int j=0;j<FC1_SIZE;j++) fc2_w[i][j]=get_rand(FC1_SIZE); fc2_b[i]=get_rand(FC1_SIZE); } }
86a5deb3af4341c584c4dc8846e7c06108d0693c.hip
// !!! This is a file automatically generated by hipify!!! #include "../shared/nelderMead.hpp" #include "../shared/util.hpp" #include "../shared/reading.hpp" #include "../shared/printing.hpp" #include "../shared/abOffLattice.hpp" #include "nelderMead.cuh" void run(int &executions, int &proteins_evalued, std::vector<NelderMead> &parameters, std::vector<ABOffLattice*> &parametersAB, int d = 0){ std::ofstream output_plot_file; std::string path; float elapsed_time; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); printParameters(executions, proteins_evalued, parameters[0].evaluations_number); for(int k = 0; k < proteins_evalued; k++){ // if(d >= parameters[k].dimension){ // continue; // } ABOffLattice * d_parametersAB; hipMalloc(&d_parametersAB, sizeof(ABOffLattice)); hipMemcpy(d_parametersAB, parametersAB[k], sizeof(ABOffLattice), hipMemcpyHostToDevice); char aa_sequence[150]; memset(aa_sequence, 0, sizeof(char) * 150); strcpy(aa_sequence, (*parametersAB[k]).aminoacid_sequence); hipMemcpyToSymbol(aminoacid_sequence, (void *) aa_sequence, 150 * sizeof(char)); printProteinParameters(parameters[k], parametersAB[k]); std::vector<NelderMeadResult> results(executions); for(int i = 0; i < executions; i++){ printf("Running execution %d...\n", i + 1); path = "resources/outputs/plot_" + std::to_string(k) + "_" + (*parametersAB[k]).protein_name + "_" + std::to_string(i) + ".txt"; // output_plot_file.open(path.c_str(), std::ofstream::out); hipEventRecord(start); results[i] = nelderMead(parameters[k], output_plot_file, (void*) parametersAB[k], (void*) d_parametersAB ); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); results[i].elapsed_time = elapsed_time / 1000.0f; // output_plot_file.close(); } printResults(results); } } int main() { OptimizationTypeEnum optimization_type; int executions, evaluations, proteins_evalued, p; std::ifstream input_file("resources/inputs/input.txt"); readInput(input_file, optimization_type, executions, evaluations, proteins_evalued, p); std::vector<NelderMead> parameters(proteins_evalued); std::vector<ABOffLattice*> parametersAB(proteins_evalued); readInputProteins(input_file, evaluations, p, optimization_type, parameters, parametersAB); // for(int i = 1; i <= 64; i *= 2){ // printf("-*-*-*-*-*-*-*-*-*-*-*-*-*- P == %d --*-*-*-*-*-*-*-*-*-*-*-*-*\n", i); // for(int j = 0; j < proteins_evalued; j++){ // parameters[j].p = i; // } run(executions, proteins_evalued, parameters, parametersAB); // } }
86a5deb3af4341c584c4dc8846e7c06108d0693c.cu
#include "../shared/nelderMead.hpp" #include "../shared/util.hpp" #include "../shared/reading.hpp" #include "../shared/printing.hpp" #include "../shared/abOffLattice.hpp" #include "nelderMead.cuh" void run(int &executions, int &proteins_evalued, std::vector<NelderMead> &parameters, std::vector<ABOffLattice*> &parametersAB, int d = 0){ std::ofstream output_plot_file; std::string path; float elapsed_time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printParameters(executions, proteins_evalued, parameters[0].evaluations_number); for(int k = 0; k < proteins_evalued; k++){ // if(d >= parameters[k].dimension){ // continue; // } ABOffLattice * d_parametersAB; cudaMalloc(&d_parametersAB, sizeof(ABOffLattice)); cudaMemcpy(d_parametersAB, parametersAB[k], sizeof(ABOffLattice), cudaMemcpyHostToDevice); char aa_sequence[150]; memset(aa_sequence, 0, sizeof(char) * 150); strcpy(aa_sequence, (*parametersAB[k]).aminoacid_sequence); cudaMemcpyToSymbol(aminoacid_sequence, (void *) aa_sequence, 150 * sizeof(char)); printProteinParameters(parameters[k], parametersAB[k]); std::vector<NelderMeadResult> results(executions); for(int i = 0; i < executions; i++){ printf("Running execution %d...\n", i + 1); path = "resources/outputs/plot_" + std::to_string(k) + "_" + (*parametersAB[k]).protein_name + "_" + std::to_string(i) + ".txt"; // output_plot_file.open(path.c_str(), std::ofstream::out); cudaEventRecord(start); results[i] = nelderMead(parameters[k], output_plot_file, (void*) parametersAB[k], (void*) d_parametersAB ); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); results[i].elapsed_time = elapsed_time / 1000.0f; // output_plot_file.close(); } printResults(results); } } int main() { OptimizationTypeEnum optimization_type; int executions, evaluations, proteins_evalued, p; std::ifstream input_file("resources/inputs/input.txt"); readInput(input_file, optimization_type, executions, evaluations, proteins_evalued, p); std::vector<NelderMead> parameters(proteins_evalued); std::vector<ABOffLattice*> parametersAB(proteins_evalued); readInputProteins(input_file, evaluations, p, optimization_type, parameters, parametersAB); // for(int i = 1; i <= 64; i *= 2){ // printf("-*-*-*-*-*-*-*-*-*-*-*-*-*- P == %d --*-*-*-*-*-*-*-*-*-*-*-*-*\n", i); // for(int j = 0; j < proteins_evalued; j++){ // parameters[j].p = i; // } run(executions, proteins_evalued, parameters, parametersAB); // } }
9e68f0e739992543643f70c5a12feecb5e2b272b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SnappyBlockUtils.cuh" #include "SnappyKernels.h" namespace nvcomp { #define HASH_BITS 12 // TBD: Tentatively limits to 2-byte codes to prevent long copy search followed by long literal // encoding #define MAX_LITERAL_LENGTH 256 #define MAX_COPY_LENGTH 64 // Syntax limit #define MAX_COPY_DISTANCE 32768 // Matches encoder limit as described in snappy format description /** * @brief snappy compressor state **/ struct snap_state_s { const uint8_t *src; ///< Ptr to uncompressed data uint32_t src_len; ///< Uncompressed data length uint8_t *dst_base; ///< Base ptr to output compressed data uint8_t *dst; ///< Current ptr to uncompressed data uint8_t *end; ///< End of uncompressed data buffer volatile uint32_t literal_length; ///< Number of literal bytes volatile uint32_t copy_length; ///< Number of copy bytes volatile uint32_t copy_distance; ///< Distance for copy bytes uint16_t hash_map[1 << HASH_BITS]; ///< Low 16-bit offset from hash }; static inline __device__ uint32_t get_max_compressed_length(uint32_t source_bytes) { // This is an estimate from the original snappy library return 32 + source_bytes + source_bytes / 6; } /** * @brief 12-bit hash from four consecutive bytes **/ static inline __device__ uint32_t snap_hash(uint32_t v) { return (v * ((1 << 20) + (0x2a00) + (0x6a) + 1)) >> (32 - HASH_BITS); } /** * @brief Outputs a snappy literal symbol * * @param dst Destination compressed byte stream * @param end End of compressed data buffer * @param src Pointer to literal bytes * @param len_minus1 Number of literal bytes minus 1 * @param t Thread in warp * * @return Updated pointer to compressed byte stream **/ static __device__ uint8_t *StoreLiterals( uint8_t *dst, uint8_t *end, const uint8_t *src, uint32_t len_minus1, uint32_t t) { if (len_minus1 < 60) { if (!t && dst < end) dst[0] = (len_minus1 << 2); dst += 1; } else if (len_minus1 <= 0xff) { if (!t && dst + 1 < end) { dst[0] = 60 << 2; dst[1] = len_minus1; } dst += 2; } else if (len_minus1 <= 0xffff) { if (!t && dst + 2 < end) { dst[0] = 61 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; } dst += 3; } else if (len_minus1 <= 0xffffff) { if (!t && dst + 3 < end) { dst[0] = 62 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; dst[3] = len_minus1 >> 16; } dst += 4; } else { if (!t && dst + 4 < end) { dst[0] = 63 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; dst[3] = len_minus1 >> 16; dst[4] = len_minus1 >> 24; } dst += 5; } for (uint32_t i = t; i <= len_minus1; i += 32) { if (dst + i < end) dst[i] = src[i]; } return dst + len_minus1 + 1; } /** * @brief Outputs a snappy copy symbol (assumed to be called by a single thread) * * @param dst Destination compressed byte stream * @param end End of compressed data buffer * @param copy_len Copy length * @param distance Copy distance * * @return Updated pointer to compressed byte stream **/ static __device__ uint8_t *StoreCopy(uint8_t *dst, uint8_t *end, uint32_t copy_len, uint32_t distance) { if (copy_len < 12 && distance < 2048) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset if (dst + 2 <= end) { dst[0] = ((distance & 0x700) >> 3) | ((copy_len - 4) << 2) | 0x01; dst[1] = distance; } return dst + 2; } else { // xxxxxx1x: copy with 6-bit length, 16-bit offset if (dst + 3 <= end) { dst[0] = ((copy_len - 1) << 2) | 0x2; dst[1] = distance; dst[2] = distance >> 8; } return dst + 3; } } /** * @brief Returns mask of any thread in the warp that has a hash value * equal to that of the calling thread **/ static inline __device__ uint32_t HashMatchAny(uint32_t v, uint32_t t) { #if (__CUDA_ARCH__ >= 700) return __match_any_sync(~0, v); #else uint32_t err_map = 0; for (uint32_t i = 0; i < HASH_BITS; i++, v >>= 1) { uint32_t b = v & 1; uint32_t match_b = BALLOT(b); err_map |= match_b ^ -(int32_t)b; } return ~err_map; #endif } /** * @brief Finds the first occurence of a consecutive 4-byte match in the input sequence, * or at most MAX_LITERAL_LENGTH bytes * * @param s Compressor state (copy_length set to 4 if a match is found, zero otherwise) * @param src Uncompressed buffer * @param pos0 Position in uncompressed buffer * @param t thread in warp * * @return Number of bytes before first match (literal length) **/ static __device__ uint32_t FindFourByteMatch(snap_state_s *s, const uint8_t *src, uint32_t pos0, uint32_t t) { uint32_t len = s->src_len; uint32_t pos = pos0; uint32_t maxpos = pos0 + MAX_LITERAL_LENGTH - 31; uint32_t match_mask, literal_cnt; if (t == 0) { s->copy_length = 0; } do { bool valid4 = (pos + t + 4 <= len); uint32_t data32 = (valid4) ? unaligned_load32(src + pos + t) : 0; uint32_t hash = (valid4) ? snap_hash(data32) : 0; uint32_t local_match = HashMatchAny(hash, t); uint32_t local_match_lane = 31 - __clz(local_match & ((1 << t) - 1)); uint32_t local_match_data = SHFL(data32, min(local_match_lane, t)); uint32_t offset, match; if (valid4) { if (local_match_lane < t && local_match_data == data32) { match = 1; offset = pos + local_match_lane; } else { offset = (pos & ~0xffff) | s->hash_map[hash]; if (offset >= pos) { offset = (offset >= 0x10000) ? offset - 0x10000 : pos; } match = (offset < pos && offset + MAX_COPY_DISTANCE >= pos + t && unaligned_load32(src + offset) == data32); } } else { match = 0; local_match = 0; offset = pos + t; } match_mask = BALLOT(match); if (match_mask != 0) { literal_cnt = __ffs(match_mask) - 1; if (t == literal_cnt) { s->copy_distance = pos + t - offset; s->copy_length = 4; } } else { literal_cnt = 32; } // Update hash up to the first 4 bytes of the copy length local_match &= (0x2 << literal_cnt) - 1; if (t <= literal_cnt && t == 31 - __clz(local_match)) { s->hash_map[hash] = pos + t; } pos += literal_cnt; } while (literal_cnt == 32 && pos < maxpos); return min(pos, len) - pos0; } /// @brief Returns the number of matching bytes for two byte sequences up to 63 bytes static __device__ uint32_t Match60(const uint8_t *src1, const uint8_t *src2, uint32_t len, uint32_t t) { uint32_t mismatch = BALLOT(t >= len || src1[t] != src2[t]); if (mismatch == 0) { mismatch = BALLOT(32 + t >= len || src1[32 + t] != src2[32 + t]); return 31 + __ffs(mismatch); // mismatch cannot be zero here if len <= 63 } else { return __ffs(mismatch) - 1; } } /** * @brief Snappy compression kernel * See http://github.com/google/snappy/blob/master/format_description.txt * * blockDim {128,1,1} * * @param[in] inputs Source/Destination buffer information per block * @param[out] outputs Compression status per block * @param[in] count Number of blocks to compress **/ extern "C" __global__ void __launch_bounds__(64) snap_kernel( const void* const* __restrict__ device_in_ptr, const uint64_t* __restrict__ device_in_bytes, void* const* __restrict__ device_out_ptr, const uint64_t* __restrict__ device_out_available_bytes, gpu_snappy_status_s * __restrict__ outputs, uint64_t* device_out_bytes) { __shared__ __align__(16) snap_state_s state_g; snap_state_s *const s = &state_g; uint32_t t = threadIdx.x; uint32_t pos; const uint8_t *src; if (!t) { const uint8_t *src = reinterpret_cast<const uint8_t *>(device_in_ptr[blockIdx.x]); uint32_t src_len = static_cast<uint32_t>(device_in_bytes[blockIdx.x]); uint8_t *dst = reinterpret_cast<uint8_t *>(device_out_ptr[blockIdx.x]); uint32_t dst_len = device_out_available_bytes ? static_cast<uint32_t>(device_out_available_bytes[blockIdx.x]) : 0; if (dst_len == 0) dst_len = get_max_compressed_length(src_len); uint8_t *end = dst + dst_len; s->src = src; s->src_len = src_len; s->dst_base = dst; s->end = end; while (src_len > 0x7f) { if (dst < end) { dst[0] = src_len | 0x80; } dst++; src_len >>= 7; } if (dst < end) { dst[0] = src_len; } s->dst = dst + 1; s->literal_length = 0; s->copy_length = 0; s->copy_distance = 0; } for (uint32_t i = t; i < sizeof(s->hash_map) / sizeof(uint32_t); i += 128) { *reinterpret_cast<volatile uint32_t *>(&s->hash_map[i * 2]) = 0; } __syncthreads(); src = s->src; pos = 0; while (pos < s->src_len) { uint32_t literal_len = s->literal_length; uint32_t copy_len = s->copy_length; uint32_t distance = s->copy_distance; __syncthreads(); if (t < 32) { // WARP0: Encode literals and copies uint8_t *dst = s->dst; uint8_t *end = s->end; if (literal_len > 0) { dst = StoreLiterals(dst, end, src + pos, literal_len - 1, t); pos += literal_len; } if (copy_len > 0) { if (t == 0) { dst = StoreCopy(dst, end, copy_len, distance); } pos += copy_len; } SYNCWARP(); if (t == 0) { s->dst = dst; } } else { pos += literal_len + copy_len; if (t < 32 * 2) { // WARP1: Find a match using 12-bit hashes of 4-byte blocks uint32_t t5 = t & 0x1f; literal_len = FindFourByteMatch(s, src, pos, t5); if (t5 == 0) { s->literal_length = literal_len; } copy_len = s->copy_length; if (copy_len != 0) { uint32_t match_pos = pos + literal_len + copy_len; // NOTE: copy_len is always 4 here copy_len += Match60(src + match_pos, src + match_pos - s->copy_distance, min(s->src_len - match_pos, 64 - copy_len), t5); if (t5 == 0) { s->copy_length = copy_len; } } } } __syncthreads(); } __syncthreads(); if (!t) { device_out_bytes[blockIdx.x] = s->dst - s->dst_base; if (outputs) outputs[blockIdx.x].status = (s->dst > s->end) ? 1 : 0; } } // Not supporting streams longer than this (not what snappy is intended for) #define SNAPPY_MAX_STREAM_SIZE 0x7fffffff #define LOG2_BATCH_SIZE 5 #define BATCH_SIZE (1 << LOG2_BATCH_SIZE) #define LOG2_BATCH_COUNT 2 #define BATCH_COUNT (1 << LOG2_BATCH_COUNT) #define LOG2_PREFETCH_SIZE 10 #define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks #define PREFETCH_SECTORS 8 // How many loads in flight when prefetching #define LITERAL_SECTORS 8 // How many loads in flight when processing the literal #define LOG_CYCLECOUNT 0 /** * @brief Describes a single LZ77 symbol (single entry in batch) **/ struct unsnap_batch_s { int32_t len; // 1..64 = Number of bytes uint32_t offset; // copy distance if greater than zero or negative of literal offset in byte stream }; /** * @brief Queue structure used to exchange data between warps **/ struct unsnap_queue_s { uint32_t prefetch_wrpos; ///< Prefetcher write position uint32_t prefetch_rdpos; ///< Prefetch consumer read position int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher) int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer }; /** * @brief Input parameters for the decompression interface **/ struct gpu_input_parameters { const void *srcDevice; uint64_t srcSize; void *dstDevice; uint64_t dstSize; }; /** * @brief snappy decompression state **/ struct unsnap_state_s { const uint8_t *base; ///< base ptr of compressed stream const uint8_t *end; ///< end of compressed stream uint32_t uncompressed_size; ///< uncompressed stream size uint32_t bytes_left; ///< bytes to uncompressed remaining int32_t error; ///< current error status uint32_t tstart; ///< start time for perf logging volatile unsnap_queue_s q; ///< queue for cross-warp communication gpu_input_parameters in; ///< input parameters for current block }; /** * @brief prefetches data for the symbol decoding stage * * @param s decompression state * @param t warp lane id **/ __device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t) { const uint8_t *base = s->base; uint32_t end = (uint32_t)(s->end - base); uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base))); int32_t pos = min(align_bytes, end); int32_t blen; // Start by prefetching up to the next a 32B-aligned location if (t < pos) { s->q.buf[t] = base[t]; } blen = 0; do { SYNCWARP(); if (!t) { uint32_t minrdpos; s->q.prefetch_wrpos = pos; minrdpos = pos - min(pos, PREFETCH_SIZE - PREFETCH_SECTORS * 32u); blen = (int)min(PREFETCH_SECTORS * 32u, end - pos); for (;;) { uint32_t rdpos = s->q.prefetch_rdpos; if (rdpos >= minrdpos) break; if (s->q.prefetch_end) { blen = 0; break; } NANOSLEEP(1600); } } blen = SHFL0(blen); if (blen == PREFETCH_SECTORS * 32u) { uint8_t vals[PREFETCH_SECTORS]; for(int i = 0; i < PREFETCH_SECTORS; ++i) vals[i] = base[pos + t + i * 32u]; for(int i = 0; i < PREFETCH_SECTORS; ++i) s->q.buf[(pos + t + i * 32u) & (PREFETCH_SIZE - 1)] = vals[i]; } else { #pragma unroll 1 for(int elem = t; elem < blen; elem += 32) { s->q.buf[(pos + elem) & (PREFETCH_SIZE - 1)] = base[pos + elem]; } } pos += blen; } while (blen > 0); } /** * @brief Lookup table for get_len3_mask() * * Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of * 3-byte code lengths in the lower 4 bits, along with the total number of * bytes used for coding the four lengths in the upper 4 bits. * The upper 4-bit value could also be obtained by 8+__popc(mask4) * * for (uint32_t k = 0; k < 1024; k++) * { * for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++) * { * v |= (b & 1) << i; * n += (b & 1) + 2; * b >>= (b & 1) + 2; * } * k_len3lut[k] = v | (n << 4); * } * **/ static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = { 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf}; /** * @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte * code length, given an input mask of up to 96 bits. * * Implemented by doing 8 consecutive lookups, building the result 4-bit at a time **/ inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2) { uint32_t m, v, m4, n; v = v0; m4 = k_len3lut[v & 0x3ff]; m = m4 & 0xf; n = m4 >> 4; // 8..12 v = v0 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 4; n += m4 >> 4; // 16..24 v = __funnelshift_r(v0, v1, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 8; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 12; n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16 v1 = __funnelshift_r(v1, v2, n); v2 >>= n; v = v1; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 16; n = m4 >> 4; // 8..12 v = v1 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 20; n += m4 >> 4; // 16..24 v = __funnelshift_r(v1, v2, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 24; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 28; return m; } /** * @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length * minus 2, given two input masks each containing bit0 or bit1 of the corresponding * code length minus 2 for up to 32 bytes **/ inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1) { uint32_t m; m = (v1 & 1) * 2 + (v0 & 1); v0 >>= (m + 2); v1 >>= (m + 1); for (uint32_t i = 1; i < 16; i++) { uint32_t m2 = (v1 & 2) | (v0 & 1); uint32_t n = m2 + 2; m |= m2 << (i * 2); v0 >>= n; v1 >>= n; } return m; } #define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)] /** * @brief decode symbols and output LZ77 batches (single-warp) * * @param s decompression state * @param t warp lane id **/ __device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t) { uint32_t cur = 0; uint32_t end = static_cast<uint32_t>(s->end - s->base); uint32_t bytes_left = s->uncompressed_size; uint32_t dst_pos = 0; int32_t batch = 0; for (;;) { int32_t batch_len = 0; volatile unsnap_batch_s *b; // Wait for prefetcher if (t == 0) { s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } b = &s->q.batch[batch * BATCH_SIZE]; } // Process small symbols in parallel: for data that does not get good compression, // the stream will consist of a large number of short literals (1-byte or 2-byte) // followed by short repeat runs. This results in many 2-byte or 3-byte symbols // that can all be decoded in parallel once we know the symbol length. { uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask; uint32_t b0; cur = SHFL0(cur); cur_t = cur + t; b0 = READ_BYTE(cur_t); v0 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 32); v1 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 64); v2 = BALLOT((b0 == 4) || (b0 & 2)); len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0); cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1)); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0); short_sym_mask = BALLOT(is_long_sym); batch_len = 0; b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b))); if (!(short_sym_mask & 1)) { batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0); if (batch_len != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_len) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[t].len = blen; b[t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); } if (batch_len != 0) { blen = SHFL(blen, batch_len - 1); cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1); if (t == 0) { dst_pos += blen; bytes_left -= blen; } } } } // Check if the batch was stopped by a 3-byte or 4-byte literal if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) { // If so, run a slower version of the above that can also handle 3/4-byte literal sequences uint32_t batch_add; do { uint32_t clen, mask_t; cur_t = cur + t; b0 = READ_BYTE(cur_t); clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2 v0 = BALLOT(clen & 1); v1 = BALLOT((clen >> 1) & 1); len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0); mask_t = (1 << (2 * t)) - 1; cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) + __popc((len3_mask & 0x55555555) & mask_t); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) || (batch_len + t >= BATCH_SIZE); batch_add = __ffs(BALLOT(is_long_sym)) - 1; if (batch_add != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_add) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[batch_len + t].len = blen; b[batch_len + t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); } if (batch_add != 0) { blen = SHFL(blen, batch_add - 1); cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3); if (t == 0) { dst_pos += blen; bytes_left -= blen; } batch_len += batch_add; } } } while (batch_add >= 6 && batch_len < BATCH_SIZE - 2); } } if (t == 0) { while (bytes_left > 0 && batch_len < BATCH_SIZE) { uint32_t blen, offset; uint8_t b0 = READ_BYTE(cur); if (b0 & 3) { uint8_t b1 = READ_BYTE(cur + 1); if (!(b0 & 2)) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset offset = ((b0 & 0xe0) << 3) | b1; blen = ((b0 >> 2) & 7) + 4; cur += 2; } else { // xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset offset = b1 | (READ_BYTE(cur + 2) << 8); if (b0 & 1) // 4-byte offset { offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24); cur += 5; } else { cur += 3; } blen = (b0 >> 2) + 1; } dst_pos += blen; if (offset - 1u >= dst_pos || bytes_left < blen) break; bytes_left -= blen; } else if (b0 < 4 * 4) { // 0000xx00: short literal blen = (b0 >> 2) + 1; offset = -(int32_t)(cur + 1); cur += 1 + blen; dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } else { // xxxxxx00: literal blen = b0 >> 2; if (blen >= 60) { uint32_t num_bytes = blen - 59; blen = READ_BYTE(cur + 1); if (num_bytes > 1) { blen |= READ_BYTE(cur + 2) << 8; if (num_bytes > 2) { blen |= READ_BYTE(cur + 3) << 16; if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; } } } cur += num_bytes; } cur += 1; blen += 1; offset = -(int32_t)cur; cur += blen; // Wait for prefetcher s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } b[batch_len].len = blen; b[batch_len].offset = offset; batch_len++; } if (batch_len != 0) { s->q.batch_len[batch] = batch_len; batch = (batch + 1) & (BATCH_COUNT - 1); } } batch_len = SHFL0(batch_len); if (t == 0) { while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); } } if (batch_len != BATCH_SIZE) { break; } } if (!t) { s->q.prefetch_end = 1; s->q.batch_len[batch] = -1; s->bytes_left = bytes_left; if (bytes_left != 0) { s->error = -2; } } } /** * @brief process LZ77 symbols and output uncompressed stream * * @param s decompression state * @param t thread id within participating group (lane id) * * NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that *would result in out-of-bounds accesses) **/ __device__ void snappy_process_symbols(unsnap_state_s *s, int t) { const uint8_t *literal_base = s->base; uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice); int batch = 0; do { volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE]; int32_t batch_len, blen_t, dist_t; if (t == 0) { while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); } } else { batch_len = 0; } batch_len = SHFL0(batch_len); if (batch_len <= 0) { break; } if (t < batch_len) { blen_t = b[t].len; dist_t = b[t].offset; } else { blen_t = dist_t = 0; } // Try to combine as many small entries as possible, but try to avoid doing that // if we see a small repeat distance 8 bytes or less if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) { uint32_t n; do { uint32_t bofs = WarpReducePos32(blen_t, t); uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs); uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0); n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)), (uint32_t)batch_len); if (n != 0) { uint32_t it = __popc(start_mask & ((2 << t) - 1)); uint32_t tr = t - SHFL(bofs - blen_t, it); int32_t dist = SHFL(dist_t, it); if (it < n) { const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist); out[t] = *src; } out += SHFL(bofs, n - 1); blen_t = SHFL(blen_t, (n + t) & 0x1f); dist_t = SHFL(dist_t, (n + t) & 0x1f); batch_len -= n; } } while (n >= 4); } for (int i = 0; i < batch_len; i++) { int32_t blen = SHFL(blen_t, i); int32_t dist = SHFL(dist_t, i); int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32; // Try to combine consecutive small entries if they are independent if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) { int32_t dist2 = SHFL(dist_t, i + 1); if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) { int32_t d; if (t < blen) { d = dist; } else { dist = dist2; d = (dist2 <= 0) ? dist2 + blen : dist2; } blen += blen2; if (t < blen) { const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d); out[t] = src[t]; } out += blen; i++; continue; } } if (dist > 0) { // Copy uint8_t b0, b1; if (t < blen) { uint32_t pos = t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b0 = *src; } if (32 + t < blen) { uint32_t pos = 32 + t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b1 = *src; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } else { // Literal uint8_t b[LITERAL_SECTORS]; dist = -dist; while (blen >= LITERAL_SECTORS * 32u) { for(int i = 0; i < LITERAL_SECTORS; ++i) b[i] = literal_base[dist + i * 32u + t]; for(int i = 0; i < LITERAL_SECTORS; ++i) out[i * 32u + t] = b[i]; dist += LITERAL_SECTORS * 32u; out += LITERAL_SECTORS * 32u; blen -= LITERAL_SECTORS * 32u; } for(int i = 0; i < LITERAL_SECTORS; ++i) if (i * 32u + t < blen) b[i] = literal_base[dist + i * 32u + t]; for(int i = 0; i < LITERAL_SECTORS; ++i) if (i * 32u + t < blen) out[i * 32u + t] = b[i]; } out += blen; } SYNCWARP(); if (t == 0) { s->q.batch_len[batch] = 0; } batch = (batch + 1) & (BATCH_COUNT - 1); } while (1); } /** * @brief Snappy decompression kernel * See http://github.com/google/snappy/blob/master/format_description.txt * * blockDim {128,1,1} * * @param[in] inputs Source & destination information per block * @param[out] outputs Decompression status per block **/ extern "C" __global__ void __launch_bounds__(96) unsnap_kernel( const void* const* __restrict__ device_in_ptr, const uint64_t* __restrict__ device_in_bytes, void* const* __restrict__ device_out_ptr, const uint64_t* __restrict__ device_out_available_bytes, gpu_snappy_status_s * __restrict__ outputs, uint64_t* __restrict__ device_out_bytes) { __shared__ __align__(16) unsnap_state_s state_g; int t = threadIdx.x; unsnap_state_s *s = &state_g; int strm_id = blockIdx.x; if (!t) { s->in.srcDevice = device_in_ptr[strm_id]; s->in.srcSize = device_in_bytes[strm_id]; s->in.dstDevice = device_out_ptr[strm_id]; s->in.dstSize = device_out_available_bytes ? device_out_available_bytes[strm_id] : 0; } if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; } __syncthreads(); if (!t) { const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice); const uint8_t *end = cur + s->in.srcSize; s->error = 0; #if LOG_CYCLECOUNT s->tstart = clock(); #endif if (cur < end) { // Read uncompressed size (varint), limited to 32-bit uint32_t uncompressed_size = *cur++; if (uncompressed_size > 0x7f) { uint32_t c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & 0x7f) | (c << 7); if (uncompressed_size >= (0x80 << 7)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14); if (uncompressed_size >= (0x80 << 14)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21); if (uncompressed_size >= (0x80 << 21)) { c = (cur < end) ? *cur++ : 0; if (c < 0x8) uncompressed_size = (uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 28); else s->error = -1; } } } } s->uncompressed_size = uncompressed_size; s->bytes_left = uncompressed_size; s->base = cur; s->end = end; if (s->in.dstSize == 0) s->in.dstSize = uncompressed_size; if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) { s->error = -1; } } else { s->error = -1; } s->q.prefetch_end = 0; s->q.prefetch_wrpos = 0; s->q.prefetch_rdpos = 0; } __syncthreads(); if (!s->error) { if (t < 32) { // WARP0: decode lengths and offsets snappy_decode_symbols(s, t); } else if (t < 64) { // WARP1: prefetch byte stream for WARP0 snappy_prefetch_bytestream(s, t & 0x1f); } else if (t < 96) { // WARP2: LZ77 snappy_process_symbols(s, t & 0x1f); } __syncthreads(); } if (!t) { if (device_out_bytes) device_out_bytes[strm_id] = s->uncompressed_size - s->bytes_left; if (outputs) outputs[strm_id].status = s->error; } } hipError_t gpu_snap( const void* const* device_in_ptr, const size_t* device_in_bytes, void* const* device_out_ptr, const size_t* device_out_available_bytes, gpu_snappy_status_s *outputs, size_t* device_out_bytes, int count, hipStream_t stream) { dim3 dim_block(64, 1); // 2 warps per stream, 1 stream per block dim3 dim_grid(count, 1); if (count > 0) {hipLaunchKernelGGL(( snap_kernel), dim3(dim_grid), dim3(dim_block), 0, stream, device_in_ptr, device_in_bytes, device_out_ptr, device_out_available_bytes, outputs, device_out_bytes); } return hipGetLastError(); } hipError_t gpu_unsnap( const void* const* device_in_ptr, const size_t* device_in_bytes, void* const* device_out_ptr, const size_t* device_out_available_bytes, gpu_snappy_status_s *outputs, size_t* device_out_bytes, int count, hipStream_t stream) { uint32_t count32 = (count > 0) ? count : 0; dim3 dim_block(96, 1); // 3 warps per stream, 1 stream per block dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count hipLaunchKernelGGL(( unsnap_kernel), dim3(dim_grid), dim3(dim_block), 0, stream, device_in_ptr, device_in_bytes, device_out_ptr, device_out_available_bytes, outputs, device_out_bytes); return hipGetLastError(); } } // nvcomp namespace
9e68f0e739992543643f70c5a12feecb5e2b272b.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SnappyBlockUtils.cuh" #include "SnappyKernels.h" namespace nvcomp { #define HASH_BITS 12 // TBD: Tentatively limits to 2-byte codes to prevent long copy search followed by long literal // encoding #define MAX_LITERAL_LENGTH 256 #define MAX_COPY_LENGTH 64 // Syntax limit #define MAX_COPY_DISTANCE 32768 // Matches encoder limit as described in snappy format description /** * @brief snappy compressor state **/ struct snap_state_s { const uint8_t *src; ///< Ptr to uncompressed data uint32_t src_len; ///< Uncompressed data length uint8_t *dst_base; ///< Base ptr to output compressed data uint8_t *dst; ///< Current ptr to uncompressed data uint8_t *end; ///< End of uncompressed data buffer volatile uint32_t literal_length; ///< Number of literal bytes volatile uint32_t copy_length; ///< Number of copy bytes volatile uint32_t copy_distance; ///< Distance for copy bytes uint16_t hash_map[1 << HASH_BITS]; ///< Low 16-bit offset from hash }; static inline __device__ uint32_t get_max_compressed_length(uint32_t source_bytes) { // This is an estimate from the original snappy library return 32 + source_bytes + source_bytes / 6; } /** * @brief 12-bit hash from four consecutive bytes **/ static inline __device__ uint32_t snap_hash(uint32_t v) { return (v * ((1 << 20) + (0x2a00) + (0x6a) + 1)) >> (32 - HASH_BITS); } /** * @brief Outputs a snappy literal symbol * * @param dst Destination compressed byte stream * @param end End of compressed data buffer * @param src Pointer to literal bytes * @param len_minus1 Number of literal bytes minus 1 * @param t Thread in warp * * @return Updated pointer to compressed byte stream **/ static __device__ uint8_t *StoreLiterals( uint8_t *dst, uint8_t *end, const uint8_t *src, uint32_t len_minus1, uint32_t t) { if (len_minus1 < 60) { if (!t && dst < end) dst[0] = (len_minus1 << 2); dst += 1; } else if (len_minus1 <= 0xff) { if (!t && dst + 1 < end) { dst[0] = 60 << 2; dst[1] = len_minus1; } dst += 2; } else if (len_minus1 <= 0xffff) { if (!t && dst + 2 < end) { dst[0] = 61 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; } dst += 3; } else if (len_minus1 <= 0xffffff) { if (!t && dst + 3 < end) { dst[0] = 62 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; dst[3] = len_minus1 >> 16; } dst += 4; } else { if (!t && dst + 4 < end) { dst[0] = 63 << 2; dst[1] = len_minus1; dst[2] = len_minus1 >> 8; dst[3] = len_minus1 >> 16; dst[4] = len_minus1 >> 24; } dst += 5; } for (uint32_t i = t; i <= len_minus1; i += 32) { if (dst + i < end) dst[i] = src[i]; } return dst + len_minus1 + 1; } /** * @brief Outputs a snappy copy symbol (assumed to be called by a single thread) * * @param dst Destination compressed byte stream * @param end End of compressed data buffer * @param copy_len Copy length * @param distance Copy distance * * @return Updated pointer to compressed byte stream **/ static __device__ uint8_t *StoreCopy(uint8_t *dst, uint8_t *end, uint32_t copy_len, uint32_t distance) { if (copy_len < 12 && distance < 2048) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset if (dst + 2 <= end) { dst[0] = ((distance & 0x700) >> 3) | ((copy_len - 4) << 2) | 0x01; dst[1] = distance; } return dst + 2; } else { // xxxxxx1x: copy with 6-bit length, 16-bit offset if (dst + 3 <= end) { dst[0] = ((copy_len - 1) << 2) | 0x2; dst[1] = distance; dst[2] = distance >> 8; } return dst + 3; } } /** * @brief Returns mask of any thread in the warp that has a hash value * equal to that of the calling thread **/ static inline __device__ uint32_t HashMatchAny(uint32_t v, uint32_t t) { #if (__CUDA_ARCH__ >= 700) return __match_any_sync(~0, v); #else uint32_t err_map = 0; for (uint32_t i = 0; i < HASH_BITS; i++, v >>= 1) { uint32_t b = v & 1; uint32_t match_b = BALLOT(b); err_map |= match_b ^ -(int32_t)b; } return ~err_map; #endif } /** * @brief Finds the first occurence of a consecutive 4-byte match in the input sequence, * or at most MAX_LITERAL_LENGTH bytes * * @param s Compressor state (copy_length set to 4 if a match is found, zero otherwise) * @param src Uncompressed buffer * @param pos0 Position in uncompressed buffer * @param t thread in warp * * @return Number of bytes before first match (literal length) **/ static __device__ uint32_t FindFourByteMatch(snap_state_s *s, const uint8_t *src, uint32_t pos0, uint32_t t) { uint32_t len = s->src_len; uint32_t pos = pos0; uint32_t maxpos = pos0 + MAX_LITERAL_LENGTH - 31; uint32_t match_mask, literal_cnt; if (t == 0) { s->copy_length = 0; } do { bool valid4 = (pos + t + 4 <= len); uint32_t data32 = (valid4) ? unaligned_load32(src + pos + t) : 0; uint32_t hash = (valid4) ? snap_hash(data32) : 0; uint32_t local_match = HashMatchAny(hash, t); uint32_t local_match_lane = 31 - __clz(local_match & ((1 << t) - 1)); uint32_t local_match_data = SHFL(data32, min(local_match_lane, t)); uint32_t offset, match; if (valid4) { if (local_match_lane < t && local_match_data == data32) { match = 1; offset = pos + local_match_lane; } else { offset = (pos & ~0xffff) | s->hash_map[hash]; if (offset >= pos) { offset = (offset >= 0x10000) ? offset - 0x10000 : pos; } match = (offset < pos && offset + MAX_COPY_DISTANCE >= pos + t && unaligned_load32(src + offset) == data32); } } else { match = 0; local_match = 0; offset = pos + t; } match_mask = BALLOT(match); if (match_mask != 0) { literal_cnt = __ffs(match_mask) - 1; if (t == literal_cnt) { s->copy_distance = pos + t - offset; s->copy_length = 4; } } else { literal_cnt = 32; } // Update hash up to the first 4 bytes of the copy length local_match &= (0x2 << literal_cnt) - 1; if (t <= literal_cnt && t == 31 - __clz(local_match)) { s->hash_map[hash] = pos + t; } pos += literal_cnt; } while (literal_cnt == 32 && pos < maxpos); return min(pos, len) - pos0; } /// @brief Returns the number of matching bytes for two byte sequences up to 63 bytes static __device__ uint32_t Match60(const uint8_t *src1, const uint8_t *src2, uint32_t len, uint32_t t) { uint32_t mismatch = BALLOT(t >= len || src1[t] != src2[t]); if (mismatch == 0) { mismatch = BALLOT(32 + t >= len || src1[32 + t] != src2[32 + t]); return 31 + __ffs(mismatch); // mismatch cannot be zero here if len <= 63 } else { return __ffs(mismatch) - 1; } } /** * @brief Snappy compression kernel * See http://github.com/google/snappy/blob/master/format_description.txt * * blockDim {128,1,1} * * @param[in] inputs Source/Destination buffer information per block * @param[out] outputs Compression status per block * @param[in] count Number of blocks to compress **/ extern "C" __global__ void __launch_bounds__(64) snap_kernel( const void* const* __restrict__ device_in_ptr, const uint64_t* __restrict__ device_in_bytes, void* const* __restrict__ device_out_ptr, const uint64_t* __restrict__ device_out_available_bytes, gpu_snappy_status_s * __restrict__ outputs, uint64_t* device_out_bytes) { __shared__ __align__(16) snap_state_s state_g; snap_state_s *const s = &state_g; uint32_t t = threadIdx.x; uint32_t pos; const uint8_t *src; if (!t) { const uint8_t *src = reinterpret_cast<const uint8_t *>(device_in_ptr[blockIdx.x]); uint32_t src_len = static_cast<uint32_t>(device_in_bytes[blockIdx.x]); uint8_t *dst = reinterpret_cast<uint8_t *>(device_out_ptr[blockIdx.x]); uint32_t dst_len = device_out_available_bytes ? static_cast<uint32_t>(device_out_available_bytes[blockIdx.x]) : 0; if (dst_len == 0) dst_len = get_max_compressed_length(src_len); uint8_t *end = dst + dst_len; s->src = src; s->src_len = src_len; s->dst_base = dst; s->end = end; while (src_len > 0x7f) { if (dst < end) { dst[0] = src_len | 0x80; } dst++; src_len >>= 7; } if (dst < end) { dst[0] = src_len; } s->dst = dst + 1; s->literal_length = 0; s->copy_length = 0; s->copy_distance = 0; } for (uint32_t i = t; i < sizeof(s->hash_map) / sizeof(uint32_t); i += 128) { *reinterpret_cast<volatile uint32_t *>(&s->hash_map[i * 2]) = 0; } __syncthreads(); src = s->src; pos = 0; while (pos < s->src_len) { uint32_t literal_len = s->literal_length; uint32_t copy_len = s->copy_length; uint32_t distance = s->copy_distance; __syncthreads(); if (t < 32) { // WARP0: Encode literals and copies uint8_t *dst = s->dst; uint8_t *end = s->end; if (literal_len > 0) { dst = StoreLiterals(dst, end, src + pos, literal_len - 1, t); pos += literal_len; } if (copy_len > 0) { if (t == 0) { dst = StoreCopy(dst, end, copy_len, distance); } pos += copy_len; } SYNCWARP(); if (t == 0) { s->dst = dst; } } else { pos += literal_len + copy_len; if (t < 32 * 2) { // WARP1: Find a match using 12-bit hashes of 4-byte blocks uint32_t t5 = t & 0x1f; literal_len = FindFourByteMatch(s, src, pos, t5); if (t5 == 0) { s->literal_length = literal_len; } copy_len = s->copy_length; if (copy_len != 0) { uint32_t match_pos = pos + literal_len + copy_len; // NOTE: copy_len is always 4 here copy_len += Match60(src + match_pos, src + match_pos - s->copy_distance, min(s->src_len - match_pos, 64 - copy_len), t5); if (t5 == 0) { s->copy_length = copy_len; } } } } __syncthreads(); } __syncthreads(); if (!t) { device_out_bytes[blockIdx.x] = s->dst - s->dst_base; if (outputs) outputs[blockIdx.x].status = (s->dst > s->end) ? 1 : 0; } } // Not supporting streams longer than this (not what snappy is intended for) #define SNAPPY_MAX_STREAM_SIZE 0x7fffffff #define LOG2_BATCH_SIZE 5 #define BATCH_SIZE (1 << LOG2_BATCH_SIZE) #define LOG2_BATCH_COUNT 2 #define BATCH_COUNT (1 << LOG2_BATCH_COUNT) #define LOG2_PREFETCH_SIZE 10 #define PREFETCH_SIZE (1 << LOG2_PREFETCH_SIZE) // 512B, in 32B chunks #define PREFETCH_SECTORS 8 // How many loads in flight when prefetching #define LITERAL_SECTORS 8 // How many loads in flight when processing the literal #define LOG_CYCLECOUNT 0 /** * @brief Describes a single LZ77 symbol (single entry in batch) **/ struct unsnap_batch_s { int32_t len; // 1..64 = Number of bytes uint32_t offset; // copy distance if greater than zero or negative of literal offset in byte stream }; /** * @brief Queue structure used to exchange data between warps **/ struct unsnap_queue_s { uint32_t prefetch_wrpos; ///< Prefetcher write position uint32_t prefetch_rdpos; ///< Prefetch consumer read position int32_t prefetch_end; ///< Prefetch enable flag (nonzero stops prefetcher) int32_t batch_len[BATCH_COUNT]; ///< Length of each batch - <0:end, 0:not ready, >0:symbol count unsnap_batch_s batch[BATCH_COUNT * BATCH_SIZE]; ///< LZ77 batch data uint8_t buf[PREFETCH_SIZE]; ///< Prefetch buffer }; /** * @brief Input parameters for the decompression interface **/ struct gpu_input_parameters { const void *srcDevice; uint64_t srcSize; void *dstDevice; uint64_t dstSize; }; /** * @brief snappy decompression state **/ struct unsnap_state_s { const uint8_t *base; ///< base ptr of compressed stream const uint8_t *end; ///< end of compressed stream uint32_t uncompressed_size; ///< uncompressed stream size uint32_t bytes_left; ///< bytes to uncompressed remaining int32_t error; ///< current error status uint32_t tstart; ///< start time for perf logging volatile unsnap_queue_s q; ///< queue for cross-warp communication gpu_input_parameters in; ///< input parameters for current block }; /** * @brief prefetches data for the symbol decoding stage * * @param s decompression state * @param t warp lane id **/ __device__ void snappy_prefetch_bytestream(unsnap_state_s *s, int t) { const uint8_t *base = s->base; uint32_t end = (uint32_t)(s->end - base); uint32_t align_bytes = (uint32_t)(0x20 - (0x1f & reinterpret_cast<uintptr_t>(base))); int32_t pos = min(align_bytes, end); int32_t blen; // Start by prefetching up to the next a 32B-aligned location if (t < pos) { s->q.buf[t] = base[t]; } blen = 0; do { SYNCWARP(); if (!t) { uint32_t minrdpos; s->q.prefetch_wrpos = pos; minrdpos = pos - min(pos, PREFETCH_SIZE - PREFETCH_SECTORS * 32u); blen = (int)min(PREFETCH_SECTORS * 32u, end - pos); for (;;) { uint32_t rdpos = s->q.prefetch_rdpos; if (rdpos >= minrdpos) break; if (s->q.prefetch_end) { blen = 0; break; } NANOSLEEP(1600); } } blen = SHFL0(blen); if (blen == PREFETCH_SECTORS * 32u) { uint8_t vals[PREFETCH_SECTORS]; for(int i = 0; i < PREFETCH_SECTORS; ++i) vals[i] = base[pos + t + i * 32u]; for(int i = 0; i < PREFETCH_SECTORS; ++i) s->q.buf[(pos + t + i * 32u) & (PREFETCH_SIZE - 1)] = vals[i]; } else { #pragma unroll 1 for(int elem = t; elem < blen; elem += 32) { s->q.buf[(pos + elem) & (PREFETCH_SIZE - 1)] = base[pos + elem]; } } pos += blen; } while (blen > 0); } /** * @brief Lookup table for get_len3_mask() * * Indexed by a 10-bit pattern, contains the corresponding 4-bit mask of * 3-byte code lengths in the lower 4 bits, along with the total number of * bytes used for coding the four lengths in the upper 4 bits. * The upper 4-bit value could also be obtained by 8+__popc(mask4) * * for (uint32_t k = 0; k < 1024; k++) * { * for (uint32_t i = 0, v = 0, b = k, n = 0; i < 4; i++) * { * v |= (b & 1) << i; * n += (b & 1) + 2; * b >>= (b & 1) + 2; * } * k_len3lut[k] = v | (n << 4); * } * **/ static const uint8_t __device__ __constant__ k_len3lut[1 << 10] = { 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xb7, 0x94, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xb7, 0x98, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xb7, 0xac, 0xb7, 0xa6, 0xb7, 0xa6, 0xb7, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xb7, 0x98, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xb7, 0x94, 0xb7, 0x92, 0xb7, 0x92, 0xb7, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xb7, 0x94, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xb7, 0x98, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xb7, 0xac, 0xb7, 0xaa, 0xb7, 0xaa, 0xb7, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xb7, 0x98, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xb7, 0xac, 0xb7, 0xbe, 0xb7, 0xbe, 0xb7, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xa3, 0x80, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xa3, 0x94, 0xa3, 0x92, 0xa3, 0x92, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xa3, 0x94, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x94, 0xa5, 0x94, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x94, 0xcf, 0x94, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xa3, 0x80, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xa3, 0xac, 0xa3, 0xaa, 0xa3, 0xaa, 0xa3, 0x80, 0xa5, 0x80, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x80, 0xa3, 0x80, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xa3, 0xac, 0xa3, 0xa6, 0xa3, 0xa6, 0xa3, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0x98, 0xa5, 0x98, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0x98, 0xcf, 0x98, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0xac, 0xa5, 0xac, 0xa5, 0xa6, 0xa5, 0xa6, 0xa5, 0xac, 0xcf, 0xac, 0xcf, 0xa6, 0xcf, 0xa6, 0xcf, 0x80, 0x91, 0x80, 0x91, 0x92, 0x91, 0x92, 0x91, 0x80, 0xbb, 0x80, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xbb, 0x94, 0xbb, 0x92, 0xbb, 0x92, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xbb, 0x94, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0x91, 0x98, 0x91, 0x92, 0x91, 0x92, 0x91, 0x98, 0xcf, 0x98, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x94, 0x91, 0x94, 0x91, 0x92, 0x91, 0x92, 0x91, 0x94, 0xcf, 0x94, 0xcf, 0x92, 0xcf, 0x92, 0xcf, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0x94, 0xbd, 0x94, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x94, 0xcf, 0x94, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0x80, 0xa9, 0x80, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x80, 0xbb, 0x80, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xbb, 0xac, 0xbb, 0xaa, 0xbb, 0xaa, 0xbb, 0x80, 0xbd, 0x80, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x80, 0xbb, 0x80, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xbb, 0xac, 0xbb, 0xbe, 0xbb, 0xbe, 0xbb, 0x98, 0xa9, 0x98, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0x98, 0xcf, 0x98, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0xac, 0xa9, 0xac, 0xa9, 0xaa, 0xa9, 0xaa, 0xa9, 0xac, 0xcf, 0xac, 0xcf, 0xaa, 0xcf, 0xaa, 0xcf, 0x98, 0xbd, 0x98, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0x98, 0xcf, 0x98, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf, 0xac, 0xbd, 0xac, 0xbd, 0xbe, 0xbd, 0xbe, 0xbd, 0xac, 0xcf, 0xac, 0xcf, 0xbe, 0xcf, 0xbe, 0xcf}; /** * @brief Returns a 32-bit mask where 1 means 3-byte code length and 0 means 2-byte * code length, given an input mask of up to 96 bits. * * Implemented by doing 8 consecutive lookups, building the result 4-bit at a time **/ inline __device__ uint32_t get_len3_mask(uint32_t v0, uint32_t v1, uint32_t v2) { uint32_t m, v, m4, n; v = v0; m4 = k_len3lut[v & 0x3ff]; m = m4 & 0xf; n = m4 >> 4; // 8..12 v = v0 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 4; n += m4 >> 4; // 16..24 v = __funnelshift_r(v0, v1, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 8; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 12; n = (n + (m4 >> 4)) & 0x1f; // (32..48) % 32 = 0..16 v1 = __funnelshift_r(v1, v2, n); v2 >>= n; v = v1; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 16; n = m4 >> 4; // 8..12 v = v1 >> n; m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 20; n += m4 >> 4; // 16..24 v = __funnelshift_r(v1, v2, n); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 24; n += m4 >> 4; // 24..36 v >>= (m4 >> 4); m4 = k_len3lut[v & 0x3ff]; m |= (m4 & 0xf) << 28; return m; } /** * @brief Returns a 32-bit mask where each 2-bit pair contains the symbol length * minus 2, given two input masks each containing bit0 or bit1 of the corresponding * code length minus 2 for up to 32 bytes **/ inline __device__ uint32_t get_len5_mask(uint32_t v0, uint32_t v1) { uint32_t m; m = (v1 & 1) * 2 + (v0 & 1); v0 >>= (m + 2); v1 >>= (m + 1); for (uint32_t i = 1; i < 16; i++) { uint32_t m2 = (v1 & 2) | (v0 & 1); uint32_t n = m2 + 2; m |= m2 << (i * 2); v0 >>= n; v1 >>= n; } return m; } #define READ_BYTE(pos) s->q.buf[(pos) & (PREFETCH_SIZE - 1)] /** * @brief decode symbols and output LZ77 batches (single-warp) * * @param s decompression state * @param t warp lane id **/ __device__ void snappy_decode_symbols(unsnap_state_s *s, uint32_t t) { uint32_t cur = 0; uint32_t end = static_cast<uint32_t>(s->end - s->base); uint32_t bytes_left = s->uncompressed_size; uint32_t dst_pos = 0; int32_t batch = 0; for (;;) { int32_t batch_len = 0; volatile unsnap_batch_s *b; // Wait for prefetcher if (t == 0) { s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } b = &s->q.batch[batch * BATCH_SIZE]; } // Process small symbols in parallel: for data that does not get good compression, // the stream will consist of a large number of short literals (1-byte or 2-byte) // followed by short repeat runs. This results in many 2-byte or 3-byte symbols // that can all be decoded in parallel once we know the symbol length. { uint32_t v0, v1, v2, len3_mask, cur_t, is_long_sym, short_sym_mask; uint32_t b0; cur = SHFL0(cur); cur_t = cur + t; b0 = READ_BYTE(cur_t); v0 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 32); v1 = BALLOT((b0 == 4) || (b0 & 2)); b0 = READ_BYTE(cur_t + 64); v2 = BALLOT((b0 == 4) || (b0 & 2)); len3_mask = SHFL0((t == 0) ? get_len3_mask(v0, v1, v2) : 0); cur_t = cur + 2 * t + __popc(len3_mask & ((1 << t) - 1)); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & ~4) != 0) && (((b0 + 1) & 2) == 0); short_sym_mask = BALLOT(is_long_sym); batch_len = 0; b = reinterpret_cast<volatile unsnap_batch_s *>(SHFL0(reinterpret_cast<uintptr_t>(b))); if (!(short_sym_mask & 1)) { batch_len = SHFL0((t == 0) ? (short_sym_mask) ? __ffs(short_sym_mask) - 1 : 32 : 0); if (batch_len != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_len) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[t].len = blen; b[t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_len = min(batch_len, short_sym_mask - 1); } if (batch_len != 0) { blen = SHFL(blen, batch_len - 1); cur = SHFL(cur_t, batch_len - 1) + 2 + ((len3_mask >> (batch_len - 1)) & 1); if (t == 0) { dst_pos += blen; bytes_left -= blen; } } } } // Check if the batch was stopped by a 3-byte or 4-byte literal if (batch_len < BATCH_SIZE - 2 && SHFL(b0 & ~4, batch_len) == 8) { // If so, run a slower version of the above that can also handle 3/4-byte literal sequences uint32_t batch_add; do { uint32_t clen, mask_t; cur_t = cur + t; b0 = READ_BYTE(cur_t); clen = (b0 & 3) ? (b0 & 2) ? 1 : 0 : (b0 >> 2); // symbol length minus 2 v0 = BALLOT(clen & 1); v1 = BALLOT((clen >> 1) & 1); len3_mask = SHFL0((t == 0) ? get_len5_mask(v0, v1) : 0); mask_t = (1 << (2 * t)) - 1; cur_t = cur + 2 * t + 2 * __popc((len3_mask & 0xaaaaaaaa) & mask_t) + __popc((len3_mask & 0x55555555) & mask_t); b0 = READ_BYTE(cur_t); is_long_sym = ((b0 & 3) ? ((b0 & 3) == 3) : (b0 > 3 * 4)) || (cur_t >= cur + 32) || (batch_len + t >= BATCH_SIZE); batch_add = __ffs(BALLOT(is_long_sym)) - 1; if (batch_add != 0) { uint32_t blen = 0; int32_t ofs = 0; if (t < batch_add) { blen = (b0 & 1) ? ((b0 >> 2) & 7) + 4 : ((b0 >> 2) + 1); ofs = (b0 & 1) ? ((b0 & 0xe0) << 3) | READ_BYTE(cur_t + 1) : (b0 & 2) ? READ_BYTE(cur_t + 1) | (READ_BYTE(cur_t + 2) << 8) : -(int32_t)(cur_t + 1); b[batch_len + t].len = blen; b[batch_len + t].offset = ofs; ofs += blen; // for correct out-of-range detection below } blen = WarpReducePos32(blen, t); bytes_left = SHFL0(bytes_left); dst_pos = SHFL0(dst_pos); short_sym_mask = __ffs(BALLOT(blen > bytes_left || ofs > (int32_t)(dst_pos + blen))); if (short_sym_mask != 0) { batch_add = min(batch_add, short_sym_mask - 1); } if (batch_add != 0) { blen = SHFL(blen, batch_add - 1); cur = SHFL(cur_t, batch_add - 1) + 2 + ((len3_mask >> ((batch_add - 1) * 2)) & 3); if (t == 0) { dst_pos += blen; bytes_left -= blen; } batch_len += batch_add; } } } while (batch_add >= 6 && batch_len < BATCH_SIZE - 2); } } if (t == 0) { while (bytes_left > 0 && batch_len < BATCH_SIZE) { uint32_t blen, offset; uint8_t b0 = READ_BYTE(cur); if (b0 & 3) { uint8_t b1 = READ_BYTE(cur + 1); if (!(b0 & 2)) { // xxxxxx01.oooooooo: copy with 3-bit length, 11-bit offset offset = ((b0 & 0xe0) << 3) | b1; blen = ((b0 >> 2) & 7) + 4; cur += 2; } else { // xxxxxx1x: copy with 6-bit length, 2-byte or 4-byte offset offset = b1 | (READ_BYTE(cur + 2) << 8); if (b0 & 1) // 4-byte offset { offset |= (READ_BYTE(cur + 3) << 16) | (READ_BYTE(cur + 4) << 24); cur += 5; } else { cur += 3; } blen = (b0 >> 2) + 1; } dst_pos += blen; if (offset - 1u >= dst_pos || bytes_left < blen) break; bytes_left -= blen; } else if (b0 < 4 * 4) { // 0000xx00: short literal blen = (b0 >> 2) + 1; offset = -(int32_t)(cur + 1); cur += 1 + blen; dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } else { // xxxxxx00: literal blen = b0 >> 2; if (blen >= 60) { uint32_t num_bytes = blen - 59; blen = READ_BYTE(cur + 1); if (num_bytes > 1) { blen |= READ_BYTE(cur + 2) << 8; if (num_bytes > 2) { blen |= READ_BYTE(cur + 3) << 16; if (num_bytes > 3) { blen |= READ_BYTE(cur + 4) << 24; } } } cur += num_bytes; } cur += 1; blen += 1; offset = -(int32_t)cur; cur += blen; // Wait for prefetcher s->q.prefetch_rdpos = cur; #pragma unroll(1) // We don't want unrolling here while (s->q.prefetch_wrpos < min(cur + 5 * BATCH_SIZE, end)) { NANOSLEEP(50); } dst_pos += blen; if (bytes_left < blen) break; bytes_left -= blen; } b[batch_len].len = blen; b[batch_len].offset = offset; batch_len++; } if (batch_len != 0) { s->q.batch_len[batch] = batch_len; batch = (batch + 1) & (BATCH_COUNT - 1); } } batch_len = SHFL0(batch_len); if (t == 0) { while (s->q.batch_len[batch] != 0) { NANOSLEEP(100); } } if (batch_len != BATCH_SIZE) { break; } } if (!t) { s->q.prefetch_end = 1; s->q.batch_len[batch] = -1; s->bytes_left = bytes_left; if (bytes_left != 0) { s->error = -2; } } } /** * @brief process LZ77 symbols and output uncompressed stream * * @param s decompression state * @param t thread id within participating group (lane id) * * NOTE: No error checks at this stage (WARP0 responsible for not sending offsets and lengths that *would result in out-of-bounds accesses) **/ __device__ void snappy_process_symbols(unsnap_state_s *s, int t) { const uint8_t *literal_base = s->base; uint8_t *out = reinterpret_cast<uint8_t *>(s->in.dstDevice); int batch = 0; do { volatile unsnap_batch_s *b = &s->q.batch[batch * BATCH_SIZE]; int32_t batch_len, blen_t, dist_t; if (t == 0) { while ((batch_len = s->q.batch_len[batch]) == 0) { NANOSLEEP(100); } } else { batch_len = 0; } batch_len = SHFL0(batch_len); if (batch_len <= 0) { break; } if (t < batch_len) { blen_t = b[t].len; dist_t = b[t].offset; } else { blen_t = dist_t = 0; } // Try to combine as many small entries as possible, but try to avoid doing that // if we see a small repeat distance 8 bytes or less if (SHFL0(min((uint32_t)dist_t, (uint32_t)SHFL_XOR(dist_t, 1))) > 8) { uint32_t n; do { uint32_t bofs = WarpReducePos32(blen_t, t); uint32_t stop_mask = BALLOT((uint32_t)dist_t < bofs); uint32_t start_mask = WarpReduceSum32((bofs < 32 && t < batch_len) ? 1 << bofs : 0); n = min(min((uint32_t)__popc(start_mask), (uint32_t)(__ffs(stop_mask) - 1u)), (uint32_t)batch_len); if (n != 0) { uint32_t it = __popc(start_mask & ((2 << t) - 1)); uint32_t tr = t - SHFL(bofs - blen_t, it); int32_t dist = SHFL(dist_t, it); if (it < n) { const uint8_t *src = (dist > 0) ? (out + t - dist) : (literal_base + tr - dist); out[t] = *src; } out += SHFL(bofs, n - 1); blen_t = SHFL(blen_t, (n + t) & 0x1f); dist_t = SHFL(dist_t, (n + t) & 0x1f); batch_len -= n; } } while (n >= 4); } for (int i = 0; i < batch_len; i++) { int32_t blen = SHFL(blen_t, i); int32_t dist = SHFL(dist_t, i); int32_t blen2 = (i + 1 < batch_len) ? SHFL(blen_t, i + 1) : 32; // Try to combine consecutive small entries if they are independent if ((uint32_t)dist >= (uint32_t)blen && blen + blen2 <= 32) { int32_t dist2 = SHFL(dist_t, i + 1); if ((uint32_t)dist2 >= (uint32_t)(blen + blen2)) { int32_t d; if (t < blen) { d = dist; } else { dist = dist2; d = (dist2 <= 0) ? dist2 + blen : dist2; } blen += blen2; if (t < blen) { const uint8_t *src = (dist > 0) ? (out - d) : (literal_base - d); out[t] = src[t]; } out += blen; i++; continue; } } if (dist > 0) { // Copy uint8_t b0, b1; if (t < blen) { uint32_t pos = t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b0 = *src; } if (32 + t < blen) { uint32_t pos = 32 + t; const uint8_t *src = out + ((pos >= dist) ? (pos % dist) : pos) - dist; b1 = *src; } if (t < blen) { out[t] = b0; } if (32 + t < blen) { out[32 + t] = b1; } } else { // Literal uint8_t b[LITERAL_SECTORS]; dist = -dist; while (blen >= LITERAL_SECTORS * 32u) { for(int i = 0; i < LITERAL_SECTORS; ++i) b[i] = literal_base[dist + i * 32u + t]; for(int i = 0; i < LITERAL_SECTORS; ++i) out[i * 32u + t] = b[i]; dist += LITERAL_SECTORS * 32u; out += LITERAL_SECTORS * 32u; blen -= LITERAL_SECTORS * 32u; } for(int i = 0; i < LITERAL_SECTORS; ++i) if (i * 32u + t < blen) b[i] = literal_base[dist + i * 32u + t]; for(int i = 0; i < LITERAL_SECTORS; ++i) if (i * 32u + t < blen) out[i * 32u + t] = b[i]; } out += blen; } SYNCWARP(); if (t == 0) { s->q.batch_len[batch] = 0; } batch = (batch + 1) & (BATCH_COUNT - 1); } while (1); } /** * @brief Snappy decompression kernel * See http://github.com/google/snappy/blob/master/format_description.txt * * blockDim {128,1,1} * * @param[in] inputs Source & destination information per block * @param[out] outputs Decompression status per block **/ extern "C" __global__ void __launch_bounds__(96) unsnap_kernel( const void* const* __restrict__ device_in_ptr, const uint64_t* __restrict__ device_in_bytes, void* const* __restrict__ device_out_ptr, const uint64_t* __restrict__ device_out_available_bytes, gpu_snappy_status_s * __restrict__ outputs, uint64_t* __restrict__ device_out_bytes) { __shared__ __align__(16) unsnap_state_s state_g; int t = threadIdx.x; unsnap_state_s *s = &state_g; int strm_id = blockIdx.x; if (!t) { s->in.srcDevice = device_in_ptr[strm_id]; s->in.srcSize = device_in_bytes[strm_id]; s->in.dstDevice = device_out_ptr[strm_id]; s->in.dstSize = device_out_available_bytes ? device_out_available_bytes[strm_id] : 0; } if (t < BATCH_COUNT) { s->q.batch_len[t] = 0; } __syncthreads(); if (!t) { const uint8_t *cur = reinterpret_cast<const uint8_t *>(s->in.srcDevice); const uint8_t *end = cur + s->in.srcSize; s->error = 0; #if LOG_CYCLECOUNT s->tstart = clock(); #endif if (cur < end) { // Read uncompressed size (varint), limited to 32-bit uint32_t uncompressed_size = *cur++; if (uncompressed_size > 0x7f) { uint32_t c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & 0x7f) | (c << 7); if (uncompressed_size >= (0x80 << 7)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 7) | 0x7f)) | (c << 14); if (uncompressed_size >= (0x80 << 14)) { c = (cur < end) ? *cur++ : 0; uncompressed_size = (uncompressed_size & ((0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 21); if (uncompressed_size >= (0x80 << 21)) { c = (cur < end) ? *cur++ : 0; if (c < 0x8) uncompressed_size = (uncompressed_size & ((0x7f << 21) | (0x7f << 14) | (0x7f << 7) | 0x7f)) | (c << 28); else s->error = -1; } } } } s->uncompressed_size = uncompressed_size; s->bytes_left = uncompressed_size; s->base = cur; s->end = end; if (s->in.dstSize == 0) s->in.dstSize = uncompressed_size; if ((cur >= end && uncompressed_size != 0) || (uncompressed_size > s->in.dstSize)) { s->error = -1; } } else { s->error = -1; } s->q.prefetch_end = 0; s->q.prefetch_wrpos = 0; s->q.prefetch_rdpos = 0; } __syncthreads(); if (!s->error) { if (t < 32) { // WARP0: decode lengths and offsets snappy_decode_symbols(s, t); } else if (t < 64) { // WARP1: prefetch byte stream for WARP0 snappy_prefetch_bytestream(s, t & 0x1f); } else if (t < 96) { // WARP2: LZ77 snappy_process_symbols(s, t & 0x1f); } __syncthreads(); } if (!t) { if (device_out_bytes) device_out_bytes[strm_id] = s->uncompressed_size - s->bytes_left; if (outputs) outputs[strm_id].status = s->error; } } cudaError_t gpu_snap( const void* const* device_in_ptr, const size_t* device_in_bytes, void* const* device_out_ptr, const size_t* device_out_available_bytes, gpu_snappy_status_s *outputs, size_t* device_out_bytes, int count, cudaStream_t stream) { dim3 dim_block(64, 1); // 2 warps per stream, 1 stream per block dim3 dim_grid(count, 1); if (count > 0) { snap_kernel<<<dim_grid, dim_block, 0, stream>>>( device_in_ptr, device_in_bytes, device_out_ptr, device_out_available_bytes, outputs, device_out_bytes); } return cudaGetLastError(); } cudaError_t gpu_unsnap( const void* const* device_in_ptr, const size_t* device_in_bytes, void* const* device_out_ptr, const size_t* device_out_available_bytes, gpu_snappy_status_s *outputs, size_t* device_out_bytes, int count, cudaStream_t stream) { uint32_t count32 = (count > 0) ? count : 0; dim3 dim_block(96, 1); // 3 warps per stream, 1 stream per block dim3 dim_grid(count32, 1); // TODO: Check max grid dimensions vs max expected count unsnap_kernel<<<dim_grid, dim_block, 0, stream>>>( device_in_ptr, device_in_bytes, device_out_ptr, device_out_available_bytes, outputs, device_out_bytes); return cudaGetLastError(); } } // nvcomp namespace
8d62311e439b4c6cbb1e10d4346c8ee032ec47fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ausmPlus.h" #include <stdio.h> __global__ void diffusiveFlux(cell *domain,float *R, float *gammma, float *mu,float wall_temp,float *k) { int x=blockIdx.x; int y=threadIdx.x; int note=-10; int faces=(int)domain[x].face[y]-1; int ourFlag=(int)domain[x].flag; float delu_delx=0.0,delv_delx=0.0,delu_dely=0.0,delv_dely=0.0; if(ourFlag==0 || ourFlag==4 || ourFlag==2) { float x_cord[]={0,0},y_cord[]={0,0}; if(faces<0 || faces>50266) { note=y; } int i1,i2; if(ourFlag==4 && y==note) { i1=note; i2=(note+1)%3; x_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } for (int i = 0; i < 3; ++i) { if(ourFlag!=4 || (ourFlag==4 && y!=note)) { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); x_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); y_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][1]); } else { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); } } if(ourFlag==2 && y==note) { i1=note; i2=(note+1)%3; x_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } else if(ourFlag==2 && y!=note) { for (int i = 0; i < 3; ++i) { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); x_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); y_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][1]); } } if(abs(x_cord[1]-x_cord[0])<=0.001) { delu_delx=0.0; delv_delx=0.0; } else { delu_delx=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); delv_delx=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); } if(abs(y_cord[1]-y_cord[0])<=0.001) { delu_dely=0.0; delv_dely=0.0; } else { delu_dely=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); delv_dely=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); } float tau_xx=2*mu[0]*(delu_delx-1/3*(delu_delx+delv_dely)); float tau_yy=2*mu[0]*(delv_dely-1/3*(delu_delx+delv_dely)); float tau_xy=mu[0]*(delu_dely+delv_delx); float temp[2]; temp[0]=(gammma[0]-1)/R[0]*(domain[x].stateVar[3]-0.5*(powf(domain[x].stateVar[1],2)+powf(domain[x].stateVar[2],2))/domain[x].stateVar[0])/domain[x].stateVar[0]; if(ourFlag!=4 || (ourFlag==4 && y!=note)) temp[1]=(gammma[0]-1)/R[0]*(domain[x].temp_var[y][3]-0.5*(powf(domain[x].temp_var[y][1],2)\ +powf(domain[x].temp_var[y][2],2))/domain[x].temp_var[y][0])/domain[x].temp_var[y][0]; else { temp[1]=wall_temp; } float delT_delx,delT_dely; if(abs(x_cord[1]-x_cord[0])<=0.001) delT_delx=0; else delT_delx=(temp[1]-temp[0])/(x_cord[1]-x_cord[0]); if(abs(y_cord[1]-y_cord[0])<=0.001) delT_dely=0; else delT_dely=(temp[1]-temp[0])/(y_cord[1]-y_cord[0]); float thetaX=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xx+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_xy+k[0]*delT_delx; float thetaY=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xy+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_yy+k[0]*delT_dely; domain[x].diffflux[y][0]=0; domain[x].diffflux[y][1]=(tau_xx*domain[x].norms[y][0]+tau_xy*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); domain[x].diffflux[y][2]=(tau_xy*domain[x].norms[y][0]+tau_yy*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); domain[x].diffflux[y][3]=(thetaX*domain[x].norms[y][0]+thetaY*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); /*if(abs((1.0/3.0)*(domain[x].nodes[0][0]+domain[x].nodes[1][0]+domain[x].nodes[2][0])+0.524158619)<0.001 && abs((1.0/3.0)*(domain[x].nodes[0][1]+domain[x].nodes[1][1]+domain[x].nodes[2][1])-0.8526501336)<0.001) { printf("diffusive %5.14lf %5.14lf %5.14lf %5.14lf %d %d %d\n",domain[x].diffflux[y][0],domain[x].diffflux[y][1],domain[x].diffflux[y][2],domain[x].diffflux[y][3],domain[x].flag,x+1,y); printf("%5.14lf %5.14lf %5.14lf %5.14lf\n",x_cord[0],x_cord[1],y_cord[0],y_cord[1]); }*/ } }
8d62311e439b4c6cbb1e10d4346c8ee032ec47fa.cu
#include "ausmPlus.h" #include <stdio.h> __global__ void diffusiveFlux(cell *domain,float *R, float *gammma, float *mu,float wall_temp,float *k) { int x=blockIdx.x; int y=threadIdx.x; int note=-10; int faces=(int)domain[x].face[y]-1; int ourFlag=(int)domain[x].flag; float delu_delx=0.0,delv_delx=0.0,delu_dely=0.0,delv_dely=0.0; if(ourFlag==0 || ourFlag==4 || ourFlag==2) { float x_cord[]={0,0},y_cord[]={0,0}; if(faces<0 || faces>50266) { note=y; } int i1,i2; if(ourFlag==4 && y==note) { i1=note; i2=(note+1)%3; x_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } for (int i = 0; i < 3; ++i) { if(ourFlag!=4 || (ourFlag==4 && y!=note)) { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); x_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); y_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][1]); } else { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); } } if(ourFlag==2 && y==note) { i1=note; i2=(note+1)%3; x_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][0]+domain[x].nodes[i2][0]); y_cord[1]=(1.0/2.0)*(domain[x].nodes[i1][1]+domain[x].nodes[i2][1]); } else if(ourFlag==2 && y!=note) { for (int i = 0; i < 3; ++i) { //x_cordinate of the elements x_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][0]); x_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][0]); //Y coordinate of the elements y_cord[0]+=(1.0/3.0)*(domain[x].nodes[i][1]); y_cord[1]+=(1.0/3.0)*(domain[faces].nodes[i][1]); } } if(abs(x_cord[1]-x_cord[0])<=0.001) { delu_delx=0.0; delv_delx=0.0; } else { delu_delx=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); delv_delx=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(x_cord[1]-x_cord[0]); } if(abs(y_cord[1]-y_cord[0])<=0.001) { delu_dely=0.0; delv_dely=0.0; } else { delu_dely=(domain[x].temp_var[y][1]/domain[x].temp_var[y][0]-domain[x].stateVar[1]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); delv_dely=(domain[x].temp_var[y][2]/domain[x].temp_var[y][0]-domain[x].stateVar[2]/domain[x].stateVar[0])/(y_cord[1]-y_cord[0]); } float tau_xx=2*mu[0]*(delu_delx-1/3*(delu_delx+delv_dely)); float tau_yy=2*mu[0]*(delv_dely-1/3*(delu_delx+delv_dely)); float tau_xy=mu[0]*(delu_dely+delv_delx); float temp[2]; temp[0]=(gammma[0]-1)/R[0]*(domain[x].stateVar[3]-0.5*(powf(domain[x].stateVar[1],2)+powf(domain[x].stateVar[2],2))/domain[x].stateVar[0])/domain[x].stateVar[0]; if(ourFlag!=4 || (ourFlag==4 && y!=note)) temp[1]=(gammma[0]-1)/R[0]*(domain[x].temp_var[y][3]-0.5*(powf(domain[x].temp_var[y][1],2)\ +powf(domain[x].temp_var[y][2],2))/domain[x].temp_var[y][0])/domain[x].temp_var[y][0]; else { temp[1]=wall_temp; } float delT_delx,delT_dely; if(abs(x_cord[1]-x_cord[0])<=0.001) delT_delx=0; else delT_delx=(temp[1]-temp[0])/(x_cord[1]-x_cord[0]); if(abs(y_cord[1]-y_cord[0])<=0.001) delT_dely=0; else delT_dely=(temp[1]-temp[0])/(y_cord[1]-y_cord[0]); float thetaX=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xx+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_xy+k[0]*delT_delx; float thetaY=domain[x].stateVar[1]/domain[x].stateVar[0]*tau_xy+domain[x].stateVar[2]/domain[x].stateVar[0]*tau_yy+k[0]*delT_dely; domain[x].diffflux[y][0]=0; domain[x].diffflux[y][1]=(tau_xx*domain[x].norms[y][0]+tau_xy*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); domain[x].diffflux[y][2]=(tau_xy*domain[x].norms[y][0]+tau_yy*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); domain[x].diffflux[y][3]=(thetaX*domain[x].norms[y][0]+thetaY*domain[x].norms[y][1])\ *sqrt(powf(domain[x].nodes[y][0]-domain[x].nodes[(y+1)%3][0],2)+powf(domain[x].nodes[y][1]-domain[x].nodes[(y+1)%3][1],2)); /*if(abs((1.0/3.0)*(domain[x].nodes[0][0]+domain[x].nodes[1][0]+domain[x].nodes[2][0])+0.524158619)<0.001 && abs((1.0/3.0)*(domain[x].nodes[0][1]+domain[x].nodes[1][1]+domain[x].nodes[2][1])-0.8526501336)<0.001) { printf("diffusive %5.14lf %5.14lf %5.14lf %5.14lf %d %d %d\n",domain[x].diffflux[y][0],domain[x].diffflux[y][1],domain[x].diffflux[y][2],domain[x].diffflux[y][3],domain[x].flag,x+1,y); printf("%5.14lf %5.14lf %5.14lf %5.14lf\n",x_cord[0],x_cord[1],y_cord[0],y_cord[1]); }*/ } }
3eb7c4d535413c85c1a95afd73097ce1be83f801.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "PGM.h" /* This function basicaly does round process with a threshold 0.5 input : a float number output : a rounded integer */ // __device__ int Round(float num) { return int(num)+0.5 < num ? int(num) + 1: int(num) + 0.5; } /* This function finds cumulative distribution function of given pointer array input X: is 256 length pointer array output Y: is cumulative version of X array */ __global__ void CDF(unsigned int *X, unsigned int *Y) { __shared__ unsigned int XY[256]; // 256 bin size histogram array int n = 256; // histogram of 256 bin int idx = blockIdx.x * blockDim.x + threadIdx.x; // initialize XY array if (idx < n) { XY[idx] = X[idx]; } // reduction int index; for (int stride = 1; stride < n; stride *= 2){ __syncthreads(); index = 2 * (idx + 1) * stride - 1; if (index < 256) { XY[index] += XY[index - stride]; } } // post reduction for (int stride = n/4; stride > 0; stride /= 2){ __syncthreads(); index = 2 * (idx + 1) * stride - 1; if(index + stride < n) { XY[index + stride] += XY[index]; } } // writing result to output matrix if (idx < n) Y[idx] = XY[idx]; } /* This function calculate the histogram of given array X is input pointer array histo is histogram of X array n is size of X array */ __global__ void histogram(unsigned int *X, unsigned int *histo, long int n){ // shared histogram array for each thread //The private histogram size needs to be small, to fit into shared memory __shared__ unsigned int private_histo[256]; // initialize histogram arrays if (threadIdx.x < 256) private_histo[threadIdx.x] = 0; __syncthreads(); // each thread add 1 histogram calculation with atomic add int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n){ atomicAdd(&private_histo[X[idx]], 1); } // barrier __syncthreads(); // merge all shared array into output array (local memory) if (threadIdx.x < 256) { atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]); } } /* This kernel makes equalization of given histogram histo is input histogram array histo_out is output histogram array dcdfhisto is device cumulative histogram array cdfmin is minimun nonzero value of dcdf array n is size of image */ __global__ void histogram_equalization(unsigned int *histo, unsigned int *histo_out, unsigned int *d_cdf_histo, int cdfmin, long int img_size){ int idx = threadIdx.x + blockIdx.x * blockDim.x; // Equalization old histogram values with new ones // 255 is maximun number in the gray scale if(idx < 256) histo_out[idx] = Round(((float)d_cdf_histo[idx] - cdfmin) / (img_size - cdfmin) * 255); } /* This kernel makes equalization of given image over histogram img_in is input array img_out is output array histo is equalized histogram arrayy n is size of image */ __global__ void image_equalization(unsigned int *img_in, unsigned int *img_out, unsigned int *histo, int n){ /* Get the result image */ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < n){ if(histo[img_in[idx]] > 255){ img_out[idx] = 255; } else{ img_out[idx] = (unsigned int)histo[img_in[idx]]; } } } int main() { PGM_IMG img_in; PGM_IMG img_out; //Enter file name for the both char array for ease of review const char filename[] = "marcie.pgm"; const char filename_out[] = "marcie_out.pgm"; printf("%s is loading\n", filename); //==================================================================== // DATA I/O and CPU Allocations. //==================================================================== /* Reading image and neccessary details */ img_in = read_pgm(filename); /* Preaparing output image details*/ img_out.w = img_in.w; img_out.h = img_in.h; long int img_size = img_in.w * img_in.h; // 1D lenght of input image size_t size = img_size * sizeof(unsigned int); // neccessary size for input image size_t size_hist = 256 * sizeof(unsigned int); // neccessary size for histogram arrays // d means devices unsigned int *cdf_histo, *dev_img_in, *d_histo, *d_histo_out, *d_cdf_histo, *dev_img_out; // Memory Allocations img_out.img = (unsigned int *)malloc(size); cdf_histo = (unsigned int *)malloc(size_hist); //==================================================================== // GPU //==================================================================== printf("GPU processes started\n"); /* Declarations of GPU Eximination time counters */ hipEvent_t start1, stop1; hipEvent_t start2, stop2; hipEvent_t start3, stop3; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&stop1); hipEventCreate(&stop2); hipEventCreate(&stop3); /* Making necessary allocations */ hipMalloc((void **) &dev_img_in, size); hipMalloc((void **) &dev_img_out, size); hipMalloc((void **) &d_histo, size_hist); hipMalloc((void **) &d_histo_out, size_hist); hipMalloc((void **) &d_cdf_histo, size_hist); hipMemcpy(dev_img_in, img_in.img, size, hipMemcpyHostToDevice); //==================================================================== int blockSize; // The launch configurator returned block size int gridSize; // The actual grid size needed, based on input size // Round up according to array size blockSize = 1024; gridSize = (img_size + blockSize - 1) / blockSize; printf("Grid and Block number for histogram [%d,%d]\n", gridSize, blockSize); hipEventRecord(start1); hipLaunchKernelGGL(( histogram), dim3(gridSize), dim3(blockSize), size_hist, 0, dev_img_in, d_histo, img_size); hipEventRecord(stop1); // private_histo_kernel //==================================================================== hipDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 256; gridSize = (256 + blockSize - 1) / blockSize; printf("Grid and Block number for CDF kernel [%d,%d]\n", gridSize, blockSize); hipEventRecord(start2); hipLaunchKernelGGL(( CDF), dim3(gridSize), dim3(blockSize), size_hist, 0, d_histo, d_cdf_histo); hipEventRecord(stop2); //==================================================================== hipDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 256; gridSize = (256 + blockSize - 1) / blockSize; printf("Grid and Block number for histogram_equalization [%d,%d]\n" , gridSize, blockSize); hipEventRecord(start3); hipMemcpy(cdf_histo, d_cdf_histo, size_hist, hipMemcpyDeviceToHost); // if this process can be done in gpu time of process will be decreased /* While loop for finding first nonzero element of CDF */ int cdfmin = 0, i = 0; while(cdfmin == 0){ cdfmin = cdf_histo[i++]; } hipLaunchKernelGGL(( histogram_equalization), dim3(gridSize), dim3(blockSize), 0, 0, d_histo, d_histo_out, d_cdf_histo, cdfmin, img_size); hipEventRecord(stop3); //==================================================================== hipDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 1024; gridSize = (img_size + blockSize - 1) / blockSize; printf("Grid and Block number for image_equalization kernel [%d,%d]\n" , gridSize, blockSize); hipLaunchKernelGGL(( image_equalization), dim3(gridSize), dim3(blockSize), 0, 0, dev_img_in, dev_img_out, d_histo_out, img_size); //==================================================================== hipMemcpy(img_out.img, dev_img_out, size, hipMemcpyDeviceToHost); //==================================================================== // Timings //==================================================================== hipEventSynchronize(stop1); hipEventSynchronize(stop2); hipEventSynchronize(stop3); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start1, stop1); printf("GPU Execution time of histogram kernel is %.2f milliseconds\n", milliseconds); milliseconds = 0; hipEventElapsedTime(&milliseconds, start2, stop2); printf("GPU Execution time of CDF kernel is %.2f milliseconds\n", milliseconds); milliseconds = 0; hipEventElapsedTime(&milliseconds, start2, stop2); printf("GPU Execution time of histogram_equalization kernel is %.2f milliseconds\n", milliseconds); //==================================================================== // DATA Output and Memory Deallocations //==================================================================== write_pgm(img_out, filename_out); free(cdf_histo); free_pgm(img_in); free_pgm(img_out); hipFree(dev_img_in); hipFree(dev_img_out); hipFree(d_histo); hipFree(d_histo_out); hipFree(d_cdf_histo); return 0; }
3eb7c4d535413c85c1a95afd73097ce1be83f801.cu
#include <stdio.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #include "PGM.h" /* This function basicaly does round process with a threshold 0.5 input : a float number output : a rounded integer */ // __device__ int Round(float num) { return int(num)+0.5 < num ? int(num) + 1: int(num) + 0.5; } /* This function finds cumulative distribution function of given pointer array input X: is 256 length pointer array output Y: is cumulative version of X array */ __global__ void CDF(unsigned int *X, unsigned int *Y) { __shared__ unsigned int XY[256]; // 256 bin size histogram array int n = 256; // histogram of 256 bin int idx = blockIdx.x * blockDim.x + threadIdx.x; // initialize XY array if (idx < n) { XY[idx] = X[idx]; } // reduction int index; for (int stride = 1; stride < n; stride *= 2){ __syncthreads(); index = 2 * (idx + 1) * stride - 1; if (index < 256) { XY[index] += XY[index - stride]; } } // post reduction for (int stride = n/4; stride > 0; stride /= 2){ __syncthreads(); index = 2 * (idx + 1) * stride - 1; if(index + stride < n) { XY[index + stride] += XY[index]; } } // writing result to output matrix if (idx < n) Y[idx] = XY[idx]; } /* This function calculate the histogram of given array X is input pointer array histo is histogram of X array n is size of X array */ __global__ void histogram(unsigned int *X, unsigned int *histo, long int n){ // shared histogram array for each thread //The private histogram size needs to be small, to fit into shared memory __shared__ unsigned int private_histo[256]; // initialize histogram arrays if (threadIdx.x < 256) private_histo[threadIdx.x] = 0; __syncthreads(); // each thread add 1 histogram calculation with atomic add int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < n){ atomicAdd(&private_histo[X[idx]], 1); } // barrier __syncthreads(); // merge all shared array into output array (local memory) if (threadIdx.x < 256) { atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]); } } /* This kernel makes equalization of given histogram histo is input histogram array histo_out is output histogram array dcdfhisto is device cumulative histogram array cdfmin is minimun nonzero value of dcdf array n is size of image */ __global__ void histogram_equalization(unsigned int *histo, unsigned int *histo_out, unsigned int *d_cdf_histo, int cdfmin, long int img_size){ int idx = threadIdx.x + blockIdx.x * blockDim.x; // Equalization old histogram values with new ones // 255 is maximun number in the gray scale if(idx < 256) histo_out[idx] = Round(((float)d_cdf_histo[idx] - cdfmin) / (img_size - cdfmin) * 255); } /* This kernel makes equalization of given image over histogram img_in is input array img_out is output array histo is equalized histogram arrayy n is size of image */ __global__ void image_equalization(unsigned int *img_in, unsigned int *img_out, unsigned int *histo, int n){ /* Get the result image */ int idx = threadIdx.x + blockIdx.x * blockDim.x; if(idx < n){ if(histo[img_in[idx]] > 255){ img_out[idx] = 255; } else{ img_out[idx] = (unsigned int)histo[img_in[idx]]; } } } int main() { PGM_IMG img_in; PGM_IMG img_out; //Enter file name for the both char array for ease of review const char filename[] = "marcie.pgm"; const char filename_out[] = "marcie_out.pgm"; printf("%s is loading\n", filename); //==================================================================== // DATA I/O and CPU Allocations. //==================================================================== /* Reading image and neccessary details */ img_in = read_pgm(filename); /* Preaparing output image details*/ img_out.w = img_in.w; img_out.h = img_in.h; long int img_size = img_in.w * img_in.h; // 1D lenght of input image size_t size = img_size * sizeof(unsigned int); // neccessary size for input image size_t size_hist = 256 * sizeof(unsigned int); // neccessary size for histogram arrays // d means devices unsigned int *cdf_histo, *dev_img_in, *d_histo, *d_histo_out, *d_cdf_histo, *dev_img_out; // Memory Allocations img_out.img = (unsigned int *)malloc(size); cdf_histo = (unsigned int *)malloc(size_hist); //==================================================================== // GPU //==================================================================== printf("GPU processes started\n"); /* Declarations of GPU Eximination time counters */ cudaEvent_t start1, stop1; cudaEvent_t start2, stop2; cudaEvent_t start3, stop3; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&stop1); cudaEventCreate(&stop2); cudaEventCreate(&stop3); /* Making necessary allocations */ cudaMalloc((void **) &dev_img_in, size); cudaMalloc((void **) &dev_img_out, size); cudaMalloc((void **) &d_histo, size_hist); cudaMalloc((void **) &d_histo_out, size_hist); cudaMalloc((void **) &d_cdf_histo, size_hist); cudaMemcpy(dev_img_in, img_in.img, size, cudaMemcpyHostToDevice); //==================================================================== int blockSize; // The launch configurator returned block size int gridSize; // The actual grid size needed, based on input size // Round up according to array size blockSize = 1024; gridSize = (img_size + blockSize - 1) / blockSize; printf("Grid and Block number for histogram [%d,%d]\n", gridSize, blockSize); cudaEventRecord(start1); histogram<<<gridSize, blockSize, size_hist>>>(dev_img_in, d_histo, img_size); cudaEventRecord(stop1); // private_histo_kernel //==================================================================== cudaDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 256; gridSize = (256 + blockSize - 1) / blockSize; printf("Grid and Block number for CDF kernel [%d,%d]\n", gridSize, blockSize); cudaEventRecord(start2); CDF<<<gridSize, blockSize, size_hist>>>(d_histo, d_cdf_histo); cudaEventRecord(stop2); //==================================================================== cudaDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 256; gridSize = (256 + blockSize - 1) / blockSize; printf("Grid and Block number for histogram_equalization [%d,%d]\n" , gridSize, blockSize); cudaEventRecord(start3); cudaMemcpy(cdf_histo, d_cdf_histo, size_hist, cudaMemcpyDeviceToHost); // if this process can be done in gpu time of process will be decreased /* While loop for finding first nonzero element of CDF */ int cdfmin = 0, i = 0; while(cdfmin == 0){ cdfmin = cdf_histo[i++]; } histogram_equalization<<<gridSize, blockSize>>>(d_histo, d_histo_out, d_cdf_histo, cdfmin, img_size); cudaEventRecord(stop3); //==================================================================== cudaDeviceSynchronize(); //==================================================================== // Round up according to array size blockSize = 1024; gridSize = (img_size + blockSize - 1) / blockSize; printf("Grid and Block number for image_equalization kernel [%d,%d]\n" , gridSize, blockSize); image_equalization<<<gridSize, blockSize>>>(dev_img_in, dev_img_out, d_histo_out, img_size); //==================================================================== cudaMemcpy(img_out.img, dev_img_out, size, cudaMemcpyDeviceToHost); //==================================================================== // Timings //==================================================================== cudaEventSynchronize(stop1); cudaEventSynchronize(stop2); cudaEventSynchronize(stop3); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start1, stop1); printf("GPU Execution time of histogram kernel is %.2f milliseconds\n", milliseconds); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start2, stop2); printf("GPU Execution time of CDF kernel is %.2f milliseconds\n", milliseconds); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start2, stop2); printf("GPU Execution time of histogram_equalization kernel is %.2f milliseconds\n", milliseconds); //==================================================================== // DATA Output and Memory Deallocations //==================================================================== write_pgm(img_out, filename_out); free(cdf_histo); free_pgm(img_in); free_pgm(img_out); cudaFree(dev_img_in); cudaFree(dev_img_out); cudaFree(d_histo); cudaFree(d_histo_out); cudaFree(d_cdf_histo); return 0; }
6316c1e8f3ecf4e80fdc551ac3302eaebe042793.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <math.h> #include <string.h> #include <fstream> #include <sstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 3 #define H 227 #define W 227 #define R 11 #define S 11 #define M 96 #define E 55 #define F 55 #define U 4 __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) { int row = threadIdx.y; int col = threadIdx.x; { for(int x=0;x<2;x++){ for(int y=0;y<2;y++){ for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ for(int k=0; k<num_ch; k++){ // float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]; // float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]; // float prod = ip*wt; if((2*row+y<height)&&(2*col+x<width)) d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)] +=d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]*d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)] ; } } } //if(d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)]<0) //d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)] =0; } } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod >=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float* IP; //float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); hipMallocManaged(&IP,batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float* OPG; //float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); hipMallocManaged(&OPG,batch_size*M*F*E*sizeof(float)); float* WT; //float *WT = (float*) malloc(M*C*R*S*sizeof(float)); hipMallocManaged(&WT,M*R*C*S*sizeof(float)); //float WT[R][S]; //float* d_o; //float* d_i; //float* d_w; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); //if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) //IP[n*C*H*W+k*H*W+c*W+d] = 0; //else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); // IP[n*C*H*W+k*H*W+c*W+d] = (c+d); } } } } //hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)); //hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice); //hipMalloc((void**) &d_w, M*C*R*S*sizeof(float)); //hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice); //hipMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float)); clock_t start, end; start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); end = clock(); dim3 dimGrid(batch_size,96,1); dim3 dimBlock(28,28,1); //gpu_start = clock();hipLaunchKernelGGL(( ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, OPG,IP,WT,55,55,4,227,11,96,batch_size,3); hipDeviceSynchronize(); //gpu_end = clock(); //hipMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost); /***NAIVE 7 LAYER LOOP IMPLEMENTATION***/ /* int n,m,x,y,i,j,k; for (x=0; x<F; x++){ for(y=0; y<E; y++){ OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; } } } } */ /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error=0; string filename = "layer_1_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("inside loop\n"); // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error %f\n", max_error); //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //hipFree(d_o); //hipFree(d_i); //hipFree(d_w); hipFree(OPG); hipFree(IP); hipFree(WT); hipFree(OP); return 0; }
6316c1e8f3ecf4e80fdc551ac3302eaebe042793.cu
#include <stdio.h> #include <iostream> #include <math.h> #include <string.h> #include <fstream> #include <sstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 3 #define H 227 #define W 227 #define R 11 #define S 11 #define M 96 #define E 55 #define F 55 #define U 4 __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) { int row = threadIdx.y; int col = threadIdx.x; { for(int x=0;x<2;x++){ for(int y=0;y<2;y++){ for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ for(int k=0; k<num_ch; k++){ // float ip = d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]; // float wt = d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)]; // float prod = ip*wt; if((2*row+y<height)&&(2*col+x<width)) d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)] +=d_i[blockIdx.x*num_ch*ip_height*ip_height+k*ip_height*ip_height+(stride*(2*row+y)+i)*ip_height+(stride*(2*col+x)+j)]*d_w[blockIdx.y*num_ch*wt_width*wt_width+k*wt_width*wt_width+(i*wt_width+j)] ; } } } //if(d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)]<0) //d_o[blockIdx.x*num_wt*height*width+blockIdx.y*width*height+(2*row+y)*width+(2*col+x)] =0; } } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod >=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float* IP; //float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); cudaMallocManaged(&IP,batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float* OPG; //float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); cudaMallocManaged(&OPG,batch_size*M*F*E*sizeof(float)); float* WT; //float *WT = (float*) malloc(M*C*R*S*sizeof(float)); cudaMallocManaged(&WT,M*R*C*S*sizeof(float)); //float WT[R][S]; //float* d_o; //float* d_i; //float* d_w; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); //if ((c<=1) || (d<=1) || (c>=29) || (d>=29)) //IP[n*C*H*W+k*H*W+c*W+d] = 0; //else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); // IP[n*C*H*W+k*H*W+c*W+d] = (c+d); } } } } //cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float)); //cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice); //cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float)); //cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice); //cudaMalloc((void**) &d_o, batch_size*M*E*F*sizeof(float)); clock_t start, end; start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); end = clock(); dim3 dimGrid(batch_size,96,1); dim3 dimBlock(28,28,1); //gpu_start = clock(); ew_gpu_mmul<<<dimGrid, dimBlock>>>(OPG,IP,WT,55,55,4,227,11,96,batch_size,3); cudaDeviceSynchronize(); //gpu_end = clock(); //cudaMemcpy(OPG,d_o, batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost); /***NAIVE 7 LAYER LOOP IMPLEMENTATION***/ /* int n,m,x,y,i,j,k; for (x=0; x<F; x++){ for(y=0; y<E; y++){ OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; } } } } */ /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error=0; string filename = "layer_1_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("inside loop\n"); // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index %d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error %f\n", max_error); //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cudaFree(d_o); //cudaFree(d_i); //cudaFree(d_w); cudaFree(OPG); cudaFree(IP); cudaFree(WT); cudaFree(OP); return 0; }
e55eed5f80452bdaa4d59ecbdd3c9783cf474720.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void fill_vec(Span<T> output, T value) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; for (int j = 0; j < vector_type::size(); j++) vec.data[j] = value; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void copy_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto input_vPtr = vector_type::get_pointer(input.data()); auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> static void launch_vectorized_fill(const Stream& stream, Span<T> output, T value) { CV_Assert(is_fully_aligned<T>(output, N)); auto kernel = raw::fill_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, value); } template <class T> void fill(const Stream& stream, Span<T> output, T value) { if (is_fully_aligned<T>(output, 4)) { launch_vectorized_fill<T, 4>(stream, output, value); } else if (is_fully_aligned<T>(output, 2)) { launch_vectorized_fill<T, 2>(stream, output, value); } else { launch_vectorized_fill<T, 1>(stream, output, value); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void fill(const Stream&, Span<__half>, __half); #endif template void fill(const Stream&, Span<float>, float); template void fill(const Stream&, Span<int>, int); template <class T, std::size_t N> static void launch_vectorized_copy(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::copy_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void copy(const Stream& stream, Span<T> output, View<T> input) { if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_copy<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_copy<T, 2>(stream, output, input); } else { launch_vectorized_copy<T, 1>(stream, output, input); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void copy(const Stream&, Span<__half>, View<__half>); #endif template void copy(const Stream&, Span<float>, View<float>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
e55eed5f80452bdaa4d59ecbdd3c9783cf474720.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "grid_stride_range.hpp" #include "execution.hpp" #include "vector_traits.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void fill_vec(Span<T> output, T value) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; for (int j = 0; j < vector_type::size(); j++) vec.data[j] = value; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void copy_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto input_vPtr = vector_type::get_pointer(input.data()); auto output_vPtr = vector_type::get_pointer(output.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> static void launch_vectorized_fill(const Stream& stream, Span<T> output, T value) { CV_Assert(is_fully_aligned<T>(output, N)); auto kernel = raw::fill_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, value); } template <class T> void fill(const Stream& stream, Span<T> output, T value) { if (is_fully_aligned<T>(output, 4)) { launch_vectorized_fill<T, 4>(stream, output, value); } else if (is_fully_aligned<T>(output, 2)) { launch_vectorized_fill<T, 2>(stream, output, value); } else { launch_vectorized_fill<T, 1>(stream, output, value); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void fill(const Stream&, Span<__half>, __half); #endif template void fill(const Stream&, Span<float>, float); template void fill(const Stream&, Span<int>, int); template <class T, std::size_t N> static void launch_vectorized_copy(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::copy_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void copy(const Stream& stream, Span<T> output, View<T> input) { if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_copy<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_copy<T, 2>(stream, output, input); } else { launch_vectorized_copy<T, 1>(stream, output, input); } } #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 530) template void copy(const Stream&, Span<__half>, View<__half>); #endif template void copy(const Stream&, Span<float>, View<float>); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
7ac1f1575e3ebd70b5fbe504b81a981e0988e504.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); cout << NX << " " << NY << " " << NZ << " " << T << endl; int num = 3; int NZ_ = NZ/num+2; int size = sizeof(REAL)*NX*NY*NZ; int partsize = sizeof(REAL)*NX*NY*NZ_; REAL **host_A = new REAL*[num]; REAL **host_B = new REAL*[num]; int size_ = NZ_*NY*NX; for (int i = 0; i < num; ++i) { host_A[i] = new REAL[size_]; host_B[i] = new REAL[size_]; //host_A[i] = (REAL*)malloc(partsize); //host_B[i] = (REAL*)malloc(partsize); } REAL* cpu_A = new REAL[NX*NY*NZ]; REAL* result_A = new REAL[NX*NY*NZ]; REAL* cpu_B = new REAL[NX*NY*NZ]; for (int part = 0; part < num; part++) for (int k = 0; k < NZ_; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { host_A[part][k*NY*NX+j*NX+i] = 1.0; host_B[part][k*NY*NX+j*NX+i] = 1.0; } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 1.0; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); float elapsed_time; double flops; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; hipMalloc(&dev_A, partsize); hipMalloc(&dev_B, partsize); hipMemcpy(dev_A, host_A[i], partsize, hipMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ hipLaunchKernelGGL(( baseline), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (hipGetLastError() != hipSuccess) printf("baseline: wrong!!!\n"); hipMemcpy(host_A[i], dev_A, partsize, hipMemcpyDeviceToHost); hipFree(dev_A); hipFree(dev_B); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; int begin, end; int smallsize = NZ/num * NY * NX; int i=0, z=0, y=0, x=0; for (i = 0; i < num; ++i) { begin = 1; end = NZ_-1; if (i == 0) { begin=0; end=NZ_-2; } else if (i == num-1) { begin=2; end=NZ_; } int index = i*smallsize; for (z = begin; z < end; ++z) for (y = 0; y < NY; ++y) for (x = 0; x < NX; ++x) { result_A[index] = host_A[i][NY*NX*z + y*NX + x]; //assert(abs(host_A[i][NY*NX*z + y*NX + x] - 1.0) < 1e-5); //if (i == 2) // cout << host_A[i][NY*NX_*z + y*NX_ + x] << endl; index++; } } check(cpu_A, result_A, NX, NY, NZ); //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt hipEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("baseopt: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache hipEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("read-only data cache: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw hipEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } hipEventRecord(stop, 0); hipEventSynchronize(stop); if (hipGetLastError() != hipSuccess) printf("share memory raw: wrong!!!\n"); hipEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// hipEventDestroy(start); hipEventDestroy(stop); */ return 0; }
7ac1f1575e3ebd70b5fbe504b81a981e0988e504.cu
#include<cuda_runtime.h> #include<iostream> #include<stdio.h> #include<sys/time.h> #include<assert.h> using namespace std; #define REAL double #define BX 128 #define BY 2 #define BZ 1 #define GZ 1 const REAL cc = 0.4; const REAL ce = 0.1; const REAL cw = 0.1; const REAL cs = 0.1; const REAL cn = 0.1; const REAL ct = 0.1; const REAL cb = 0.1; //Must be re-written, including all the parameters int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps) { int i, j, k, s; #define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k)) for(s = 0; s < steps; s ++) { for(i = 0; i < nx; i ++) { for(j = 0; j < ny; j ++) { for(k = 0; k < nz; k ++) { REAL r = 0.4*A[IDX(i,j,k)]; if(k != 0) r += 0.1*A[IDX(i,j,k-1)]; else r += 0.1*A[IDX(i,j,k)]; if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)]; else r += 0.1*A[IDX(i,j,k)]; if(j != 0) r += 0.1*A[IDX(i,j-1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != 0) r += 0.1*A[IDX(i-1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)]; else r += 0.1*A[IDX(i,j,k)]; B[IDX(i,j,k)] = r; } } } REAL *tmp = NULL; tmp = A, A = B, B = tmp; } return 0; } void check(REAL *a, REAL *b, int nx, int ny, int nz) { int slice = nx * ny; for (int z = 1; z < nz-1; ++z) { for (int y = 1; y < ny-1; ++y) { for (int x = 1; x < nz-1; ++x) { int idx = z * slice + y * nx + x; if (abs(a[idx]-b[idx]) > 1e-5) { cout << a[idx] << " " << b[idx] << endl; printf("%d\n", idx); printf("Wrong!!!!!!!!\n"); return; } } } } printf("Right!!!!!!\n"); return; } __global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; int k = kb; //int k = kb > 0? kb: 1; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; //#pragma unroll for (; k < ke; k++){ int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int b = (k==0)?c:c-slice; int t = (k==nz-1)?c:c+slice; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*A[t] + cb*A[b] + cc*A[c]; c += slice; //if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){ // B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx] // +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx]; // idx += slice; } } __global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; //b_t = B[idx+slice]; ////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] //// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx]; //A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx] // +ct*b_t + cb*b_b + cc*b_c; //b_b = b_c; //b_c = b_t; //idx += slice; } return; } __global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz) { int i = threadIdx.x + blockDim.x*blockIdx.x; int j = threadIdx.y + blockDim.y*blockIdx.y; int kb = nz/gridDim.z*blockIdx.z; int slice = nx*ny; //int k = kb > 0? kb: 1; int k = kb; int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz; int c = i + j*nx + k*slice; int b = (k==0)?c:c-slice; int w = (i==0)?c:c-1; int e = (i==nx-1)?c:c+1; int n = (j==0)?c:c-nx; int s = (j==ny-1)?c:c+nx; int t; double b_b = A[b]; double b_c = A[c]; double b_t; #pragma unroll for (; k < ke; k++){ t = (k==nz-1)?c:c+slice; b_t = A[t]; B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n] +ct*b_t + cb*b_b + cc*b_c; b_b = b_c; b_c = b_t; c += slice; } return; } int main(int argc, char **argv){ int NX = atoi(argv[2]); int NY = atoi(argv[3]); int NZ = atoi(argv[4]); int T = atoi(argv[5]); cout << NX << " " << NY << " " << NZ << " " << T << endl; int num = 3; int NZ_ = NZ/num+2; int size = sizeof(REAL)*NX*NY*NZ; int partsize = sizeof(REAL)*NX*NY*NZ_; REAL **host_A = new REAL*[num]; REAL **host_B = new REAL*[num]; int size_ = NZ_*NY*NX; for (int i = 0; i < num; ++i) { host_A[i] = new REAL[size_]; host_B[i] = new REAL[size_]; //host_A[i] = (REAL*)malloc(partsize); //host_B[i] = (REAL*)malloc(partsize); } REAL* cpu_A = new REAL[NX*NY*NZ]; REAL* result_A = new REAL[NX*NY*NZ]; REAL* cpu_B = new REAL[NX*NY*NZ]; for (int part = 0; part < num; part++) for (int k = 0; k < NZ_; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { host_A[part][k*NY*NX+j*NX+i] = 1.0; host_B[part][k*NY*NX+j*NX+i] = 1.0; } for (int k = 0; k < NZ; k++) for (int j = 0; j < NY; j++) for (int i = 0; i < NX; i++) { //cout << k*NY*NX + j*NX + i << endl; cpu_A[k*NY*NX+j*NX+i] = 1.0; cpu_B[k*NY*NX+j*NX+i] = 1.0; result_A[k*NY*NX+j*NX+i] = 1.0; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float elapsed_time; double flops; for (int i = 0; i < num; ++i) { REAL *dev_A, *dev_B; cudaMalloc(&dev_A, partsize); cudaMalloc(&dev_B, partsize); cudaMemcpy(dev_A, host_A[i], partsize, cudaMemcpyHostToDevice); dim3 threadPerBlock(BX, BY, BZ); //128,1,1 dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1 /////////////////////////////////////////////////////////////// //baseline for (int t = 0; t < T; t++){ baseline<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } /////////////////////////////////////////////////////////////// if (cudaGetLastError() != cudaSuccess) printf("baseline: wrong!!!\n"); cudaMemcpy(host_A[i], dev_A, partsize, cudaMemcpyDeviceToHost); cudaFree(dev_A); cudaFree(dev_B); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); struct timeval t1, t2; gettimeofday(&t1, NULL); stencil(cpu_A, cpu_B, NX, NY, NZ, T); gettimeofday(&t2, NULL); float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3; cout << "CPU time:" << cpubaseline_time/T << " ms" << endl; int begin, end; int smallsize = NZ/num * NY * NX; int i=0, z=0, y=0, x=0; for (i = 0; i < num; ++i) { begin = 1; end = NZ_-1; if (i == 0) { begin=0; end=NZ_-2; } else if (i == num-1) { begin=2; end=NZ_; } int index = i*smallsize; for (z = begin; z < end; ++z) for (y = 0; y < NY; ++y) for (x = 0; x < NX; ++x) { result_A[index] = host_A[i][NY*NX*z + y*NX + x]; //assert(abs(host_A[i][NY*NX*z + y*NX + x] - 1.0) < 1e-5); //if (i == 2) // cout << host_A[i][NY*NX_*z + y*NX_ + x] << endl; index++; } } check(cpu_A, result_A, NX, NY, NZ); //printf("baseline: Gflops = %lf\n", flops); printf("baseline: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; /* /////////////////////////////////////////////////////////////// //baseopt cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("baseopt: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("baseopt: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("baseopt: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //read-only data cache cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("read-only data cache: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("read-only data cache: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("read-only data cache: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////// //share memory raw cudaEventRecord(start, 0); for (int t = 0; t < T; t++){ shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ); REAL* tmp = dev_A; dev_A = dev_B; dev_B = tmp; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); if (cudaGetLastError() != cudaSuccess) printf("share memory raw: wrong!!!\n"); cudaEventElapsedTime(&elapsed_time, start, stop); printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T); flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6; flops /= elapsed_time; //printf("share memory raw: Gflops = %lf\n", flops); /////////////////////////////////////////////////////////////// cudaEventDestroy(start); cudaEventDestroy(stop); */ return 0; }
31a008f55274b01bb840f24a13b15ebeb146b4b4.hip
// !!! This is a file automatically generated by hipify!!! #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include<vector> #include<iostream> #include <cassert> using std::vector; using namespace std; #define THREADS 16 // Forward declaration of the kernel __global__ void matrixMul(const int* a, const int* b, int* c, int N); // Compare the GPU results with CPU void check_result(const vector<int>& a, const vector<int>& b, const vector<int>& c, const int N) { // row for (int row = 0; row < N; ++row) { //column for (int col = 0; col < N; ++col) { //resultant element is computed int element = 0; for (int i = 0; i < N; i++) { element += a[row * N + i] * b[i * N + col]; } //Check CPU and GPU result assert(element == c[row * N + col]); } } } // Matrix multiplication - Host driver code void MatMul_driver(const vector<int> &h_a, const vector<int> &h_b, vector<int> &h_c, size_t bytes, int no_elements ) { // Allocating device memory int* d_a, * d_b, * d_c; hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // Copy data to the device hipMemcpy(d_a, h_a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b.data(), bytes, hipMemcpyHostToDevice); //Threads per block const dim3 blockSize(THREADS, THREADS, 1); //Number of blocks const dim3 gridSize(ceil(no_elements / (float)THREADS), ceil(no_elements / (float)THREADS), 1); // Launch kernel hipLaunchKernelGGL(( matrixMul) , dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, no_elements); // Copy back to the host hipMemcpy(h_c.data(), d_c, bytes, hipMemcpyDeviceToHost); // Check result check_result(h_a, h_b, h_c, no_elements); cout<< "SUCCESSFULLY COMPLETED\n"; // Free memory on device hipFree(d_a); hipFree(d_b); hipFree(d_c); } // Matrix multiplication kernel called by MatMul_driver() // to be executed on the GPU. // Each thread fetches the data from the device memory. // Each thread reads one row of A and one column of B // and computes the corresponding element of C // does not take advantage of shared memory __global__ void matrixMul(const int* a, const int* b, int* c, int N) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Iterate over row, and down column if (row < N && col < N) { c[row * N + col] = 0; for (int k = 0; k < N; k++) { // Accumulate results for a single element c[row * N + col] += a[row * N + k] * b[k * N + col]; } } }
31a008f55274b01bb840f24a13b15ebeb146b4b4.cu
 #include "device_launch_parameters.h" #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include<vector> #include<iostream> #include <cassert> using std::vector; using namespace std; #define THREADS 16 // Forward declaration of the kernel __global__ void matrixMul(const int* a, const int* b, int* c, int N); // Compare the GPU results with CPU void check_result(const vector<int>& a, const vector<int>& b, const vector<int>& c, const int N) { // row for (int row = 0; row < N; ++row) { //column for (int col = 0; col < N; ++col) { //resultant element is computed int element = 0; for (int i = 0; i < N; i++) { element += a[row * N + i] * b[i * N + col]; } //Check CPU and GPU result assert(element == c[row * N + col]); } } } // Matrix multiplication - Host driver code void MatMul_driver(const vector<int> &h_a, const vector<int> &h_b, vector<int> &h_c, size_t bytes, int no_elements ) { // Allocating device memory int* d_a, * d_b, * d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Copy data to the device cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice); //Threads per block const dim3 blockSize(THREADS, THREADS, 1); //Number of blocks const dim3 gridSize(ceil(no_elements / (float)THREADS), ceil(no_elements / (float)THREADS), 1); // Launch kernel matrixMul <<<gridSize, blockSize>>> (d_a, d_b, d_c, no_elements); // Copy back to the host cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost); // Check result check_result(h_a, h_b, h_c, no_elements); cout<< "SUCCESSFULLY COMPLETED\n"; // Free memory on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } // Matrix multiplication kernel called by MatMul_driver() // to be executed on the GPU. // Each thread fetches the data from the device memory. // Each thread reads one row of A and one column of B // and computes the corresponding element of C // does not take advantage of shared memory __global__ void matrixMul(const int* a, const int* b, int* c, int N) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Iterate over row, and down column if (row < N && col < N) { c[row * N + col] = 0; for (int k = 0; k < N; k++) { // Accumulate results for a single element c[row * N + col] += a[row * N + k] * b[k * N + col]; } } }
bc596331ef09210e4a48d782f00b391c4d156200.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zdiagcheck.cu, normal z -> d, Tue Aug 30 09:38:46 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // kernel __global__ void zdiagcheck_kernel( int num_rows, int num_cols, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magma_int_t * dinfo ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ int localinfo = 1; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; // check whether there exists a nonzero diagonal entry for( j=start; j<end; j++){ if( (dcolind[j] == row) && (dval[j] != MAGMA_D_ZERO) ){ localinfo = 0; } } // set flag to 1 if( localinfo == 1 ){ dinfo[0] = -3009; } } } /** Purpose ------- This routine checks for a CSR matrix whether there exists a zero on the diagonal. This can be the diagonal entry missing or an explicit zero. Arguments --------- @param[in] dA magma_d_matrix matrix in CSR format @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_ddiagcheck( magma_d_matrix dA, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t *hinfo = NULL; magma_int_t * dinfo = NULL; dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; CHECK( magma_imalloc( &dinfo, 1 ) ); CHECK( magma_imalloc_cpu( &hinfo, 1 ) ); hinfo[0] = 0; magma_isetvector( 1, hinfo, 1, dinfo, 1, queue ); hipLaunchKernelGGL(( zdiagcheck_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo ); info = hinfo[0]; magma_igetvector( 1, dinfo, 1, hinfo, 1, queue ); info = hinfo[0]; cleanup: magma_free( dinfo ); magma_free_cpu( hinfo ); return info; }
bc596331ef09210e4a48d782f00b391c4d156200.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/magma_zdiagcheck.cu, normal z -> d, Tue Aug 30 09:38:46 2016 */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // kernel __global__ void zdiagcheck_kernel( int num_rows, int num_cols, magmaDouble_ptr dval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magma_int_t * dinfo ) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ int localinfo = 1; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; // check whether there exists a nonzero diagonal entry for( j=start; j<end; j++){ if( (dcolind[j] == row) && (dval[j] != MAGMA_D_ZERO) ){ localinfo = 0; } } // set flag to 1 if( localinfo == 1 ){ dinfo[0] = -3009; } } } /** Purpose ------- This routine checks for a CSR matrix whether there exists a zero on the diagonal. This can be the diagonal entry missing or an explicit zero. Arguments --------- @param[in] dA magma_d_matrix matrix in CSR format @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_daux ********************************************************************/ extern "C" magma_int_t magma_ddiagcheck( magma_d_matrix dA, magma_queue_t queue ) { magma_int_t info = 0; magma_int_t *hinfo = NULL; magma_int_t * dinfo = NULL; dim3 grid( magma_ceildiv( dA.num_rows, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; CHECK( magma_imalloc( &dinfo, 1 ) ); CHECK( magma_imalloc_cpu( &hinfo, 1 ) ); hinfo[0] = 0; magma_isetvector( 1, hinfo, 1, dinfo, 1, queue ); zdiagcheck_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( dA.num_rows, dA.num_cols, dA.dval, dA.drow, dA.dcol, dinfo ); info = hinfo[0]; magma_igetvector( 1, dinfo, 1, hinfo, 1, queue ); info = hinfo[0]; cleanup: magma_free( dinfo ); magma_free_cpu( hinfo ); return info; }
4c745f58e57253259e34715b8a0b381f70f311fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @File main.cu * * The main file of the project * * Parallel Computations on GPU (PCG 2020) * Assignment no. 1 (cuda) * Login: xstupi00 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include <vector> #include "nbody.h" #include "h5Helper.h" #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: " "nbody <N> <dt> <steps> <threads/block> <write intesity> " "<reduction threads> <reduction threads/block> <input> <output>\n" ); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); // Number of records to continuous writing of partial results const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU particles structures t_particles particles_cpu; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: CPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The overall memory size of input particles size_t size = N * sizeof(float); // Allocates page-locked memory on the host. Maps the allocation into the CUDA address space checkCudaErrors(hipHostMalloc(&particles_cpu.pos_x, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.pos_y, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.pos_z, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_x, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_y, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.vel_z, size, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc(&particles_cpu.weight, size, hipHostMallocMapped)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory layout descriptor (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Position in X particles_cpu.pos_y, 1, 0, // Position in Y particles_cpu.pos_z, 1, 0, // Position in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum // Number of records in output file ); // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: GPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // GPU particles structure std::vector<t_particles> particles_gpu(2); // Allocate memory on the device for (auto &p_gpu : particles_gpu) { checkCudaErrors(hipMalloc(&p_gpu.pos_x, size)); checkCudaErrors(hipMalloc(&p_gpu.pos_y, size)); checkCudaErrors(hipMalloc(&p_gpu.pos_z, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_x, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_y, size)); checkCudaErrors(hipMalloc(&p_gpu.vel_z, size)); checkCudaErrors(hipMalloc(&p_gpu.weight, size)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from host to device. checkCudaErrors(hipMemcpy(particles_gpu[0].pos_x, particles_cpu.pos_x, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].pos_y, particles_cpu.pos_y, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].pos_z, particles_cpu.pos_z, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_x, particles_cpu.vel_x, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_y, particles_cpu.vel_y, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].vel_z, particles_cpu.vel_z, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[0].weight, particles_cpu.weight, size, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(particles_gpu[1].weight, particles_gpu[0].weight, size, hipMemcpyDeviceToDevice)); gettimeofday(&t1, 0); for (int s = 0; s < steps; s++) { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: kernels invocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// hipLaunchKernelGGL(( calculate_velocity), dim3(simulationGrid), dim3(thr_blc), 0, 0, particles_gpu[s & 1ul], particles_gpu[(s+1) & 1ul], N, dt); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// if (writeFreq > 0 && (s % writeFreq == 0)) { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization and file access logic (step 4) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: invocation of center-of-mass kernel (step 3.1, step 3.2, step 4) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for particle data (step 0) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnGPU; // Copies particles data from device to host. checkCudaErrors(hipMemcpy(particles_cpu.pos_x, particles_gpu[steps & 1].pos_x, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.pos_y, particles_gpu[steps & 1].pos_y, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.pos_z, particles_gpu[steps & 1].pos_z, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_x, particles_gpu[steps & 1].vel_x, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_y, particles_gpu[steps & 1].vel_y, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.vel_z, particles_gpu[steps & 1].vel_z, size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(particles_cpu.weight, particles_gpu[steps & 1].weight, size, hipMemcpyDeviceToHost)); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x << ", " << comOnCPU.y << ", " << comOnCPU.z << ", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comOnGPU.x << ", " << comOnGPU.y << ", " << comOnGPU.z << ", " << comOnGPU.w << std::endl; // Writing final values to the file h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w); h5Helper.writeParticleDataFinal(); // Free page-locked memory. checkCudaErrors(hipHostFree(particles_cpu.pos_x)); checkCudaErrors(hipHostFree(particles_cpu.pos_y)); checkCudaErrors(hipHostFree(particles_cpu.pos_z)); checkCudaErrors(hipHostFree(particles_cpu.vel_x)); checkCudaErrors(hipHostFree(particles_cpu.vel_y)); checkCudaErrors(hipHostFree(particles_cpu.vel_z)); checkCudaErrors(hipHostFree(particles_cpu.weight)); // Free memory on the device. for (auto p_gpu : particles_gpu) { checkCudaErrors(hipFree(p_gpu.pos_x)); checkCudaErrors(hipFree(p_gpu.pos_y)); checkCudaErrors(hipFree(p_gpu.pos_z)); checkCudaErrors(hipFree(p_gpu.vel_x)); checkCudaErrors(hipFree(p_gpu.vel_y)); checkCudaErrors(hipFree(p_gpu.vel_z)); checkCudaErrors(hipFree(p_gpu.weight)); } return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
4c745f58e57253259e34715b8a0b381f70f311fe.cu
/** * @File main.cu * * The main file of the project * * Parallel Computations on GPU (PCG 2020) * Assignment no. 1 (cuda) * Login: xstupi00 */ #include <sys/time.h> #include <cstdio> #include <cmath> #include <vector> #include "nbody.h" #include "h5Helper.h" #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * Main rotine * @param argc * @param argv * @return */ int main(int argc, char **argv) { // Time measurement struct timeval t1, t2; if (argc != 10) { printf("Usage: " "nbody <N> <dt> <steps> <threads/block> <write intesity> " "<reduction threads> <reduction threads/block> <input> <output>\n" ); exit(1); } // Number of particles const int N = std::stoi(argv[1]); // Length of time step const float dt = std::stof(argv[2]); // Number of steps const int steps = std::stoi(argv[3]); // Number of thread blocks const int thr_blc = std::stoi(argv[4]); // Write frequency int writeFreq = std::stoi(argv[5]); // number of reduction threads const int red_thr = std::stoi(argv[6]); // Number of reduction threads/blocks const int red_thr_blc = std::stoi(argv[7]); // Size of the simulation CUDA gird - number of blocks const size_t simulationGrid = (N + thr_blc - 1) / thr_blc; // Size of the reduction CUDA grid - number of blocks const size_t reductionGrid = (red_thr + red_thr_blc - 1) / red_thr_blc; // Log benchmark setup printf("N: %d\n", N); printf("dt: %f\n", dt); printf("steps: %d\n", steps); printf("threads/block: %d\n", thr_blc); printf("blocks/grid: %lu\n", simulationGrid); printf("reduction threads/block: %d\n", red_thr_blc); printf("reduction blocks/grid: %lu\n", reductionGrid); // Number of records to continuous writing of partial results const size_t recordsNum = (writeFreq > 0) ? (steps + writeFreq - 1) / writeFreq : 0; writeFreq = (writeFreq > 0) ? writeFreq : 0; // CPU particles structures t_particles particles_cpu; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: CPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The overall memory size of input particles size_t size = N * sizeof(float); // Allocates page-locked memory on the host. Maps the allocation into the CUDA address space checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_x, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_y, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.pos_z, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_x, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_y, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.vel_z, size, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc(&particles_cpu.weight, size, cudaHostAllocMapped)); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory layout descriptor (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /* * Caution! Create only after CPU side allocation * parameters: * Stride of two Offset of the first * Data pointer consecutive elements element in floats, * in floats, not bytes not bytes */ MemDesc md( particles_cpu.pos_x, 1, 0, // Position in X particles_cpu.pos_y, 1, 0, // Position in Y particles_cpu.pos_z, 1, 0, // Position in Z particles_cpu.vel_x, 1, 0, // Velocity in X particles_cpu.vel_y, 1, 0, // Velocity in Y particles_cpu.vel_z, 1, 0, // Velocity in Z particles_cpu.weight, 1, 0, // Weight N, // Number of particles recordsNum // Number of records in output file ); // Initialisation of helper class and loading of input data H5Helper h5Helper(argv[8], argv[9], md); try { h5Helper.init(); h5Helper.readParticleData(); } catch (const std::exception &e) { std::cerr << e.what() << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: GPU side memory allocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // GPU particles structure std::vector<t_particles> particles_gpu(2); // Allocate memory on the device for (auto &p_gpu : particles_gpu) { checkCudaErrors(cudaMalloc(&p_gpu.pos_x, size)); checkCudaErrors(cudaMalloc(&p_gpu.pos_y, size)); checkCudaErrors(cudaMalloc(&p_gpu.pos_z, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_x, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_y, size)); checkCudaErrors(cudaMalloc(&p_gpu.vel_z, size)); checkCudaErrors(cudaMalloc(&p_gpu.weight, size)); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Copies particles data from host to device. checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_x, particles_cpu.pos_x, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_y, particles_cpu.pos_y, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].pos_z, particles_cpu.pos_z, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_x, particles_cpu.vel_x, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_y, particles_cpu.vel_y, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].vel_z, particles_cpu.vel_z, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[0].weight, particles_cpu.weight, size, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(particles_gpu[1].weight, particles_gpu[0].weight, size, cudaMemcpyDeviceToDevice)); gettimeofday(&t1, 0); for (int s = 0; s < steps; s++) { //////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: kernels invocation (step 0) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////// calculate_velocity<<<simulationGrid, thr_blc>>>(particles_gpu[s & 1ul], particles_gpu[(s+1) & 1ul], N, dt); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization (step 4) // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// if (writeFreq > 0 && (s % writeFreq == 0)) { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: synchronization and file access logic (step 4) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: invocation of center-of-mass kernel (step 3.1, step 3.2, step 4) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); gettimeofday(&t2, 0); // Approximate simulation wall time double t = (1000000.0 * (t2.tv_sec - t1.tv_sec) + t2.tv_usec - t1.tv_usec) / 1000000.0; printf("Time: %f s\n", t); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for particle data (step 0) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnGPU; // Copies particles data from device to host. checkCudaErrors(cudaMemcpy(particles_cpu.pos_x, particles_gpu[steps & 1].pos_x, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.pos_y, particles_gpu[steps & 1].pos_y, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.pos_z, particles_gpu[steps & 1].pos_z, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_x, particles_gpu[steps & 1].vel_x, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_y, particles_gpu[steps & 1].vel_y, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.vel_z, particles_gpu[steps & 1].vel_z, size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(particles_cpu.weight, particles_gpu[steps & 1].weight, size, cudaMemcpyDeviceToHost)); ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // FILL IN: memory transfers for center-of-mass (step 3.1, step 3.2) // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// float4 comOnCPU = centerOfMassCPU(md); std::cout << "Center of mass on CPU:" << std::endl << comOnCPU.x << ", " << comOnCPU.y << ", " << comOnCPU.z << ", " << comOnCPU.w << std::endl; std::cout << "Center of mass on GPU:" << std::endl << comOnGPU.x << ", " << comOnGPU.y << ", " << comOnGPU.z << ", " << comOnGPU.w << std::endl; // Writing final values to the file h5Helper.writeComFinal(comOnGPU.x, comOnGPU.y, comOnGPU.z, comOnGPU.w); h5Helper.writeParticleDataFinal(); // Free page-locked memory. checkCudaErrors(cudaFreeHost(particles_cpu.pos_x)); checkCudaErrors(cudaFreeHost(particles_cpu.pos_y)); checkCudaErrors(cudaFreeHost(particles_cpu.pos_z)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_x)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_y)); checkCudaErrors(cudaFreeHost(particles_cpu.vel_z)); checkCudaErrors(cudaFreeHost(particles_cpu.weight)); // Free memory on the device. for (auto p_gpu : particles_gpu) { checkCudaErrors(cudaFree(p_gpu.pos_x)); checkCudaErrors(cudaFree(p_gpu.pos_y)); checkCudaErrors(cudaFree(p_gpu.pos_z)); checkCudaErrors(cudaFree(p_gpu.vel_x)); checkCudaErrors(cudaFree(p_gpu.vel_y)); checkCudaErrors(cudaFree(p_gpu.vel_z)); checkCudaErrors(cudaFree(p_gpu.weight)); } return 0; }// end of main //----------------------------------------------------------------------------------------------------------------------
764d1160fa59ecceb684896ad265bd2d0145471c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __host__ __device__ int outInvariant(int inValue) { return inValue * inValue; } __host__ __device__ int outDependent(int value, int inIdx, int outIdx) { if (inIdx == outIdx) { return 2 * value; } else if (inIdx > outIdx) { return value / (inIdx - outIdx); } else { return value / (outIdx - inIdx); } } __global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE int inIdx = blockIdx.x*blockDim.x + threadIdx.x; int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } __global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE int outIdx = blockIdx.x*blockDim.x + threadIdx.x; int temp = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { temp += outDependent(outInvariant(in[inIdx]),inIdx,outIdx); } out[outIdx] = temp; } static void s2g_cpu_scatter(int *in, int *out, int len) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE for (int outIdx = 0; outIdx < len; ++outIdx) { int temp = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { temp += outDependent(outInvariant(in[inIdx]),inIdx,outIdx); } out[outIdx] = temp; } } static void s2g_gpu_scatter(int *in, int *out, int len, int nThreads) { //@@ INSERT CODE HERE //int nThreads = 1024; int nBlocks = (len+nThreads-1)/nThreads; hipLaunchKernelGGL(( s2g_gpu_scatter_kernel), dim3(nBlocks),dim3(nThreads), 0, 0, in,out,len); } static void s2g_gpu_gather(int *in, int *out, int len, int nThreads) { //@@ INSERT CODE HERE int nBlocks = (len+nThreads-1)/nThreads; hipLaunchKernelGGL(( s2g_gpu_gather_kernel), dim3(nBlocks),dim3(nThreads), 0, 0, in,out,len); } int main(int argc, char **argv) { wbArg_t args; int inputLength; int *hostInput; int *hostOutput; int *deviceInput; int *deviceOutput; size_t byteCount; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostOutput = (int *)malloc(inputLength * sizeof(int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); byteCount = inputLength * sizeof(int); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(hipMalloc((void **)&deviceInput, byteCount)); wbCheck(hipMalloc((void **)&deviceOutput, byteCount)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(hipMemcpy(deviceInput, hostInput, byteCount, hipMemcpyHostToDevice)); wbCheck(hipMemset(deviceOutput, 0, byteCount)); wbTime_stop(GPU, "Copying input memory to the GPU."); // Utilizing the max possible threads per block and determining // block partition size from the problem dimension (see device code) hipDeviceProp_t deviceProp; wbCheck(hipGetDeviceProperties(&deviceProp, 0)); int nThreadsPerBlock = deviceProp.maxThreadsPerBlock; ////////////////////////////////////////// // CPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Scatter computation"); s2g_cpu_scatter(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Scatter computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_scatter(deviceInput, deviceOutput, inputLength, nThreadsPerBlock); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(hipMemcpy(hostOutput, deviceOutput, byteCount, hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(hipMemset(deviceOutput, 0, byteCount)); ////////////////////////////////////////// // CPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Gather computation"); s2g_cpu_gather(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Gather computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Gather computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength, nThreadsPerBlock); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Gather computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(hipMemcpy(hostOutput, deviceOutput, byteCount, hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(hipMemset(deviceOutput, 0, byteCount)); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceInput); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostInput); free(hostOutput); return 0; }
764d1160fa59ecceb684896ad265bd2d0145471c.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __host__ __device__ int outInvariant(int inValue) { return inValue * inValue; } __host__ __device__ int outDependent(int value, int inIdx, int outIdx) { if (inIdx == outIdx) { return 2 * value; } else if (inIdx > outIdx) { return value / (inIdx - outIdx); } else { return value / (outIdx - inIdx); } } __global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE int inIdx = blockIdx.x*blockDim.x + threadIdx.x; int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } __global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE int outIdx = blockIdx.x*blockDim.x + threadIdx.x; int temp = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { temp += outDependent(outInvariant(in[inIdx]),inIdx,outIdx); } out[outIdx] = temp; } static void s2g_cpu_scatter(int *in, int *out, int len) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE for (int outIdx = 0; outIdx < len; ++outIdx) { int temp = 0; for (int inIdx = 0; inIdx < len; ++inIdx) { temp += outDependent(outInvariant(in[inIdx]),inIdx,outIdx); } out[outIdx] = temp; } } static void s2g_gpu_scatter(int *in, int *out, int len, int nThreads) { //@@ INSERT CODE HERE //int nThreads = 1024; int nBlocks = (len+nThreads-1)/nThreads; s2g_gpu_scatter_kernel<<<nBlocks,nThreads>>>(in,out,len); } static void s2g_gpu_gather(int *in, int *out, int len, int nThreads) { //@@ INSERT CODE HERE int nBlocks = (len+nThreads-1)/nThreads; s2g_gpu_gather_kernel<<<nBlocks,nThreads>>>(in,out,len); } int main(int argc, char **argv) { wbArg_t args; int inputLength; int *hostInput; int *hostOutput; int *deviceInput; int *deviceOutput; size_t byteCount; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = (int *)wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostOutput = (int *)malloc(inputLength * sizeof(int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); byteCount = inputLength * sizeof(int); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(cudaMalloc((void **)&deviceInput, byteCount)); wbCheck(cudaMalloc((void **)&deviceOutput, byteCount)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck(cudaMemcpy(deviceInput, hostInput, byteCount, cudaMemcpyHostToDevice)); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); wbTime_stop(GPU, "Copying input memory to the GPU."); // Utilizing the max possible threads per block and determining // block partition size from the problem dimension (see device code) cudaDeviceProp deviceProp; wbCheck(cudaGetDeviceProperties(&deviceProp, 0)); int nThreadsPerBlock = deviceProp.maxThreadsPerBlock; ////////////////////////////////////////// // CPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Scatter computation"); s2g_cpu_scatter(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Scatter computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_scatter(deviceInput, deviceOutput, inputLength, nThreadsPerBlock); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(cudaMemcpy(hostOutput, deviceOutput, byteCount, cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); ////////////////////////////////////////// // CPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Gather computation"); s2g_cpu_gather(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Gather computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Gather computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength, nThreadsPerBlock); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Gather computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck(cudaMemcpy(hostOutput, deviceOutput, byteCount, cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceInput); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostInput); free(hostOutput); return 0; }
df1f31301d7fca39a000315500e43423e3eed6d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define TILE_WIDTH 40 //----------------------------------------------- //-------------------------------------------------- // Compute C = A * B //------------------------------------------------- __global__ void matrixMultiplySharedMem(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ double ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ double ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; double Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = Pvalue; }
df1f31301d7fca39a000315500e43423e3eed6d9.cu
#include "includes.h" #define TILE_WIDTH 40 //----------------------------------------------- //-------------------------------------------------- // Compute C = A * B //------------------------------------------------- __global__ void matrixMultiplySharedMem(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here __shared__ double ds_M[TILE_WIDTH][TILE_WIDTH]; __shared__ double ds_N[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x, ty = threadIdx.y, Row = by * TILE_WIDTH + ty, Col = bx * TILE_WIDTH + tx; double Pvalue = 0; for (int m = 0; m < (numAColumns-1)/TILE_WIDTH+1; ++m) { if (Row < numARows && m*TILE_WIDTH+tx < numAColumns) ds_M[ty][tx] = A[Row*numAColumns + m*TILE_WIDTH+tx]; else ds_M[ty][tx] = 0; if (Col < numBColumns && m*TILE_WIDTH+ty < numBRows) ds_N[ty][tx] = B[(m*TILE_WIDTH+ty)*numBColumns+Col]; else ds_N[ty][tx] = 0; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += ds_M[ty][k] * ds_N[k][tx]; __syncthreads(); } if (Row < numCRows && Col < numCColumns) C[Row*numCColumns+Col] = Pvalue; }
95de7ca52ee7e22f26a4117c0491ce4ef6be2878.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void process_kernel1(const float* input1, const float* input2, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sin(input1[globalThreadId]) + (float)cos(input2[globalThreadId]); }
95de7ca52ee7e22f26a4117c0491ce4ef6be2878.cu
#include "includes.h" __global__ void process_kernel1(const float* input1, const float* input2, float* output, int numElements){ int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; int globalThreadId = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(globalThreadId < numElements) output[globalThreadId] = (float)sin(input1[globalThreadId]) + (float)cos(input2[globalThreadId]); }
6e21c84f18b87c0532426c6bdc437bb0e2341e31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_cuda.h> #include "convolutionFFT2D_common.h" #include "convolutionFFT2D.cuh" //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel( float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; hipLaunchKernelGGL(( padKernel_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padKernel_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder( float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y)); SET_FLOAT_BASE; hipLaunchKernelGGL(( padDataClampToBorder_kernel), dim3(grid), dim3(threads), 0, 0, d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// extern "C" void modulateAndNormalize( fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int padding ) { assert(fftW % 2 == 0); const int dataSize = fftH * (fftW / 2 + padding); hipLaunchKernelGGL(( modulateAndNormalize_kernel), dim3(iDivUp(dataSize, 256)), dim3(256), 0, 0, d_Dst, d_Src, dataSize, 1.0f / (float)(fftW *fftH) ); getLastCudaError("modulateAndNormalize() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// static const double PI = 3.1415926535897932384626433832795; static const uint BLOCKDIM = 256; extern "C" void spPostprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE; hipLaunchKernelGGL(( spPostprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n"); } extern "C" void spPreprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = -dir * PI / (double)DX; SET_FCOMPLEX_BASE; hipLaunchKernelGGL(( spPreprocess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D //////////////////////////////////////////////////////////////////////////////// extern "C" void spProcess2D( void *d_Dst, void *d_SrcA, void *d_SrcB, uint DY, uint DX, int dir ) { assert(DY % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = (DY / 2) * DX; const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE_A; SET_FCOMPLEX_BASE_B; hipLaunchKernelGGL(( spProcess2D_kernel), dim3(iDivUp(threadCount, BLOCKDIM)), dim3(BLOCKDIM), 0, 0, (fComplex *)d_Dst, (fComplex *)d_SrcA, (fComplex *)d_SrcB, DY, DX, threadCount, (float)phaseBase, 0.5f / (float)(DY *DX) ); getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n"); }
6e21c84f18b87c0532426c6bdc437bb0e2341e31.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <helper_cuda.h> #include "convolutionFFT2D_common.h" #include "convolutionFFT2D.cuh" //////////////////////////////////////////////////////////////////////////////// /// Position convolution kernel center at (0, 0) in the image //////////////////////////////////////////////////////////////////////////////// extern "C" void padKernel( float *d_Dst, float *d_Src, int fftH, int fftW, int kernelH, int kernelW, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(kernelW, threads.x), iDivUp(kernelH, threads.y)); SET_FLOAT_BASE; padKernel_kernel<<<grid, threads>>>( d_Dst, d_Src, fftH, fftW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padKernel_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Prepare data for "pad to border" addressing mode //////////////////////////////////////////////////////////////////////////////// extern "C" void padDataClampToBorder( float *d_Dst, float *d_Src, int fftH, int fftW, int dataH, int dataW, int kernelW, int kernelH, int kernelY, int kernelX ) { assert(d_Src != d_Dst); dim3 threads(32, 8); dim3 grid(iDivUp(fftW, threads.x), iDivUp(fftH, threads.y)); SET_FLOAT_BASE; padDataClampToBorder_kernel<<<grid, threads>>>( d_Dst, d_Src, fftH, fftW, dataH, dataW, kernelH, kernelW, kernelY, kernelX ); getLastCudaError("padDataClampToBorder_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Modulate Fourier image of padded data by Fourier image of padded kernel // and normalize by FFT size //////////////////////////////////////////////////////////////////////////////// extern "C" void modulateAndNormalize( fComplex *d_Dst, fComplex *d_Src, int fftH, int fftW, int padding ) { assert(fftW % 2 == 0); const int dataSize = fftH * (fftW / 2 + padding); modulateAndNormalize_kernel<<<iDivUp(dataSize, 256), 256>>>( d_Dst, d_Src, dataSize, 1.0f / (float)(fftW *fftH) ); getLastCudaError("modulateAndNormalize() execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // 2D R2C / C2R post/preprocessing kernels //////////////////////////////////////////////////////////////////////////////// static const double PI = 3.1415926535897932384626433832795; static const uint BLOCKDIM = 256; extern "C" void spPostprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE; spPostprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPostprocess2D_kernel<<<>>> execution failed\n"); } extern "C" void spPreprocess2D( void *d_Dst, void *d_Src, uint DY, uint DX, uint padding, int dir ) { assert(d_Src != d_Dst); assert(DX % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = DY * (DX / 2); const double phaseBase = -dir * PI / (double)DX; SET_FCOMPLEX_BASE; spPreprocess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_Src, DY, DX, threadCount, padding, (float)phaseBase ); getLastCudaError("spPreprocess2D_kernel<<<>>> execution failed\n"); } //////////////////////////////////////////////////////////////////////////////// // Combined spPostprocess2D + modulateAndNormalize + spPreprocess2D //////////////////////////////////////////////////////////////////////////////// extern "C" void spProcess2D( void *d_Dst, void *d_SrcA, void *d_SrcB, uint DY, uint DX, int dir ) { assert(DY % 2 == 0); #if(POWER_OF_TWO) uint log2DX, log2DY; uint factorizationRemX = factorRadix2(log2DX, DX); uint factorizationRemY = factorRadix2(log2DY, DY); assert(factorizationRemX == 1 && factorizationRemY == 1); #endif const uint threadCount = (DY / 2) * DX; const double phaseBase = dir * PI / (double)DX; SET_FCOMPLEX_BASE_A; SET_FCOMPLEX_BASE_B; spProcess2D_kernel<<<iDivUp(threadCount, BLOCKDIM), BLOCKDIM>>>( (fComplex *)d_Dst, (fComplex *)d_SrcA, (fComplex *)d_SrcB, DY, DX, threadCount, (float)phaseBase, 0.5f / (float)(DY *DX) ); getLastCudaError("spProcess2D_kernel<<<>>> execution failed\n"); }
f013489876e56dd987ffe7306f8a51ab16b5b3a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ftl/operators/cuda/mls/multi_intensity.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <ftl/cuda/weighting.hpp> using ftl::cuda::MLSMultiIntensity; using cv::cuda::GpuMat; // ==== Multi image MLS ======================================================== __device__ inline float featureWeight(int f1, int f2) { const float w = (1.0f-(float(abs(f1 - f2)) / 255.0f)); return w*w*w; } __device__ inline float biasedLength(const float3 &Xi, const float3 &X) { float l = 0.0f; const float dx = Xi.x-X.x; l += 2.0f*dx*dx; const float dy = Xi.y-X.y; l += 2.0f*dy*dy; const float dz = Xi.z-X.z; l += dz*dz; return sqrt(l); } /* * Gather points for Moving Least Squares, from each source image */ template <int SEARCH_RADIUS, typename T> __global__ void mls_gather_intensity_kernel( const half4* __restrict__ normals_in, half4* __restrict__ normals_out, const float* __restrict__ depth_origin, const float* __restrict__ depth_in, const T* __restrict__ feature_origin, const T* __restrict__ feature_in, float4* __restrict__ centroid_out, float* __restrict__ contrib_out, float smoothing, float fsmoothing, float4x4 o_2_in, float4x4 in_2_o, float3x3 in_2_o33, ftl::rgbd::Camera camera_origin, ftl::rgbd::Camera camera_in, int npitch_out, int cpitch_out, int wpitch_out, int dpitch_o, int dpitch_i, int npitch_in, int fpitch_o, int fpitch_i ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || y < 0 || x >= camera_origin.width || y >= camera_origin.height) return; //float3 nX = make_float3(normals_out[y*npitch_out+x]); //float3 aX = make_float3(centroid_out[y*cpitch_out+x]); //float contrib = contrib_out[y*wpitch_out+x]; float3 nX = make_float3(0.0f, 0.0f, 0.0f); float3 aX = make_float3(0.0f, 0.0f, 0.0f); float contrib = 0.0f; float d0 = depth_origin[x+y*dpitch_o]; if (d0 <= camera_origin.minDepth || d0 >= camera_origin.maxDepth) return; const uchar2 feature1 = feature_origin[x+y*fpitch_o]; // TODO: Could the origin depth actually be averaged with depth in other // image? So as to not bias towards the original view? float3 X = camera_origin.screenToCam((int)(x),(int)(y),d0); const float3 camPos = o_2_in * X; const int2 s = camera_in.camToScreen<int2>(camPos); // Move point off of original surface //X = camera_origin.screenToCam((int)(x),(int)(y),d0-0.005f); // TODO: Could dynamically adjust the smoothing factors depending upon the // number of matches. Meaning, if lots of good local and feature matches // then be less likely to include poorer matches. Conversely, if only poor // non-local or feature distance matches, then increase search range. // Could also adapt smoothing parameters using variance or some other local // image measures. Or by just considering distance of the central projected // points as an indication of miss-alignment. Both spatial distance and // feature distance could be used to adjust parameters. // FIXME: For own image, need to do something different than the below. // Otherwise smoothing factors become 0. float spatial_smoothing = (depth_origin == depth_in) ? 0.005f : 0.03f; // 3cm default float hf_intensity_smoothing = (depth_origin == depth_in) ? 100.0f : 50.0f; float mean_smoothing = (depth_origin == depth_in) ? 100.0f : 100.0f; if (depth_origin != depth_in && s.x >= 0 && s.x < camera_in.width && s.y >= 0 && s.y <= camera_in.height) { // Get depth at exact reprojection point const float d = depth_in[s.x+s.y*dpitch_i]; // Get feature at exact reprojection point const uchar2 feature2 = feature_in[s.x+s.y*fpitch_i]; if (d > camera_in.minDepth && d < camera_in.maxDepth) { spatial_smoothing = min(spatial_smoothing, smoothing * fabsf(camPos.z - d)); } hf_intensity_smoothing = smoothing * fabsf(float(feature2.x) - float(feature1.x)); //mean_smoothing = smoothing * fabsf(float(feature2.y) - float(feature1.y)); // Make start point the average of the two sources... const float3 reversePos = in_2_o * camera_in.screenToCam(s.x, s.y, d); X = X + (reversePos) / 2.0f; } // Make sure there is a minimum smoothing value spatial_smoothing = max(0.05f, spatial_smoothing); hf_intensity_smoothing = max(50.0f, hf_intensity_smoothing); //mean_smoothing = max(10.0f, mean_smoothing); // Check for neighbourhood symmetry and use to weight overall contribution float symx = 0.0f; float symy = 0.0f; // Neighbourhood for (int v=-SEARCH_RADIUS; v<=SEARCH_RADIUS; ++v) { for (int u=-SEARCH_RADIUS; u<=SEARCH_RADIUS; ++u) { const float d = (s.x+u >= 0 && s.x+u < camera_in.width && s.y+v >= 0 && s.y+v < camera_in.height) ? depth_in[s.x+u+(s.y+v)*dpitch_i] : 0.0f; if (d <= camera_in.minDepth || d >= camera_in.maxDepth) continue; // Point and normal of neighbour const float3 Xi = in_2_o * camera_in.screenToCam(s.x+u, s.y+v, d); const float3 Ni = make_float3(normals_in[s.x+u+(s.y+v)*npitch_in]); const uchar2 feature2 = feature_in[s.x+u+(s.y+v)*fpitch_i]; // Gauss approx weighting functions // Rule: spatially close and feature close is strong // Spatially far or feature far, then poor. // So take the minimum, must be close and feature close to get good value const float w_high_int = ftl::cuda::weighting(float(abs(int(feature1.x)-int(feature2.x))), hf_intensity_smoothing); const float w_mean_int = ftl::cuda::weighting(float(abs(int(feature1.y)-int(feature2.y))), mean_smoothing); const float w_space = ftl::cuda::spatialWeighting(X,Xi,spatial_smoothing); //const float w_space = ftl::cuda::weighting(biasedLength(Xi,X),spatial_smoothing); // TODO: Distance from cam squared // TODO: Angle from cam (dot of normal and ray) //const float w_lateral = ftl::cuda::weighting(sqrt(Xi.x*X.x + Xi.y*X.y), float(SEARCH_RADIUS)*camera_origin.fx/Xi.z); const float w = (length(Ni) > 0.0f) ? min(w_space, min(w_high_int, w_mean_int)) //w_space * w_high_int * w_mean_int // : 0.0f; // Mark as a symmetry contribution if (w > 0.0f) { if (u < 0) symx -= 1.0f; else if (u > 0) symx += 1.0f; if (v < 0) symy -= 1.0f; else if (v > 0) symy += 1.0f; } aX += Xi*w; nX += (in_2_o33 * Ni)*w; contrib += w; } } // Perfect symmetry means symx and symy == 0, therefore actual length can // be measure of asymmetry, so when inverted it can be used to weight result symx = fabsf(symx) / float(SEARCH_RADIUS); symy = fabsf(symy) / float(SEARCH_RADIUS); float l = 1.0f - sqrt(symx*symx+symy*symy); l = l*l; normals_out[y*npitch_out+x] = make_half4(make_float3(normals_out[y*npitch_out+x]) + nX*l, 0.0f); centroid_out[y*cpitch_out+x] = make_float4(make_float3(centroid_out[y*cpitch_out+x]) + aX*l, 0.0f); contrib_out[y*wpitch_out+x] = contrib_out[y*wpitch_out+x] + contrib*l; } /** * Convert accumulated values into estimate of depth and normals at pixel. */ __global__ void mls_reduce_kernel_2( const float4* __restrict__ centroid, const half4* __restrict__ normals, const float* __restrict__ contrib_out, half4* __restrict__ normals_out, float* __restrict__ depth, uchar4* __restrict__ colour, ftl::rgbd::Camera camera, int npitch_in, int cpitch_in, int wpitch, int npitch, int dpitch, int cpitch ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= 0 && y >= 0 && x < camera.width && y < camera.height) { float3 nX = make_float3(normals[y*npitch_in+x]); float3 aX = make_float3(centroid[y*cpitch_in+x]); float contrib = contrib_out[y*wpitch+x]; //depth[x+y*dpitch] = X.z; //normals_out[x+y*npitch] = make_half4(0.0f, 0.0f, 0.0f, 0.0f); float d0 = depth[x+y*dpitch]; //depth[x+y*dpitch] = 0.0f; if (d0 <= camera.minDepth || d0 >= camera.maxDepth || contrib == 0.0f) return; float3 X = camera.screenToCam((int)(x),(int)(y),d0); nX /= contrib; // Weighted average normal aX /= contrib; // Weighted average point (centroid) // Signed-Distance Field function float fX = nX.x * (X.x - aX.x) + nX.y * (X.y - aX.y) + nX.z * (X.z - aX.z); // Calculate new point using SDF function to adjust depth (and position) X = X - nX * fX; depth[x+y*dpitch] = X.z; normals_out[x+y*npitch] = make_half4(nX / length(nX), 0.0f); if (colour) { int2 s = camera.camToScreen<int2>(X); float pd = min(1.0f, max(0.0f, X.z-d0) / 0.002f); float nd = min(1.0f, -min(0.0f, X.z-d0) / 0.002f); colour[x+y*cpitch] = (abs(s.x - x) > 1 || abs(s.y - y) > 1) ? make_uchar4(0,255,0,255) : make_uchar4( 255.0f - pd*255.0f, 255.0f - pd*255.0f - nd*255.0f, 255.0f - nd*255.0f, 255.0f ); } } } MLSMultiIntensity::MLSMultiIntensity(int radius) : radius_(radius) { } MLSMultiIntensity::~MLSMultiIntensity() { } void MLSMultiIntensity::prime( const GpuMat &depth_prime, const GpuMat &intensity_prime, const ftl::rgbd::Camera &cam_prime, const float4x4 &pose_prime, hipStream_t stream) { depth_prime_ = depth_prime; intensity_prime_ = intensity_prime; cam_prime_ = cam_prime; pose_prime_ = pose_prime; centroid_accum_.create(depth_prime.size(), CV_32FC4); normal_accum_.create(depth_prime.size(), CV_16FC4); weight_accum_.create(depth_prime.size(), CV_32F); cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream); // Reset buffers centroid_accum_.setTo(cv::Scalar(0,0,0,0), cvstream); weight_accum_.setTo(cv::Scalar(0), cvstream); normal_accum_.setTo(cv::Scalar(0,0,0,0), cvstream); } void MLSMultiIntensity::gatherPrime(float smoothing, hipStream_t stream) { // Can use a simpler kernel without pose transformations } void MLSMultiIntensity::gather( const GpuMat &depth_src, const GpuMat &normals_src, const GpuMat &intensity_src, const ftl::rgbd::Camera &cam_src, const float4x4 &pose_src, float smoothing, float fsmoothing, hipStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); float4x4 inv_pose_src = pose_src; inv_pose_src.invert(); float4x4 o_2_in = inv_pose_src * pose_prime_; float4x4 inv_pose_prime = pose_prime_; inv_pose_prime.invert(); float4x4 in_2_o = inv_pose_prime * pose_src; float3x3 in_2_o33 = inv_pose_prime.getFloat3x3() * pose_src.getFloat3x3(); hipLaunchKernelGGL(( mls_gather_intensity_kernel<3>), dim3(gridSize), dim3(blockSize), 0, stream, normals_src.ptr<half4>(), normal_accum_.ptr<half4>(), depth_prime_.ptr<float>(), depth_src.ptr<float>(), intensity_prime_.ptr<uchar2>(), intensity_src.ptr<uchar2>(), centroid_accum_.ptr<float4>(), weight_accum_.ptr<float>(), smoothing, fsmoothing, o_2_in, in_2_o, in_2_o33, cam_prime_, cam_src, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), depth_prime_.step1(), depth_src.step1(), normals_src.step1()/4, intensity_prime_.step1()/2, intensity_src.step1()/2 ); cudaSafeCall( hipGetLastError() ); } void MLSMultiIntensity::adjust( GpuMat &depth_out, GpuMat &normals_out, hipStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); normals_out.create(depth_prime_.size(), CV_16FC4); depth_out.create(depth_prime_.size(), CV_32F); // FIXME: Depth prime assumed to be same as depth out hipLaunchKernelGGL(( mls_reduce_kernel_2), dim3(gridSize), dim3(blockSize), 0, stream, centroid_accum_.ptr<float4>(), normal_accum_.ptr<half4>(), weight_accum_.ptr<float>(), normals_out.ptr<half4>(), depth_prime_.ptr<float>(), nullptr, cam_prime_, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), normals_out.step1()/4, depth_prime_.step1(), 0 ); cudaSafeCall( hipGetLastError() ); } void MLSMultiIntensity::adjust( GpuMat &depth_out, GpuMat &normals_out, GpuMat &colour_out, hipStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); normals_out.create(depth_prime_.size(), CV_16FC4); depth_out.create(depth_prime_.size(), CV_32F); // FIXME: Depth prime assumed to be same as depth out hipLaunchKernelGGL(( mls_reduce_kernel_2), dim3(gridSize), dim3(blockSize), 0, stream, centroid_accum_.ptr<float4>(), normal_accum_.ptr<half4>(), weight_accum_.ptr<float>(), normals_out.ptr<half4>(), depth_prime_.ptr<float>(), colour_out.ptr<uchar4>(), cam_prime_, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), normals_out.step1()/4, depth_prime_.step1(), colour_out.step1()/4 ); cudaSafeCall( hipGetLastError() ); } // ============================================================================= template <int RADIUS> __global__ void mean_subtract_kernel( const uchar* __restrict__ intensity, uchar2* __restrict__ contrast, int pitch, int cpitch, int width, int height ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= RADIUS && y >= RADIUS && x < width-RADIUS && y < height-RADIUS) { float mean = 0.0f; for (int v=-RADIUS; v<=RADIUS; ++v) { for (int u=-RADIUS; u<=RADIUS; ++u) { mean += float(intensity[x+u+(y+v)*pitch]); } } mean /= float((2*RADIUS+1)*(2*RADIUS+1)); float diff = float(intensity[x+y*pitch]) - mean; contrast[x+y*cpitch] = make_uchar2(max(0, min(254, int(diff)+127)), int(mean)); } } void ftl::cuda::mean_subtract( const cv::cuda::GpuMat &intensity, cv::cuda::GpuMat &contrast, int radius, hipStream_t stream ) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((intensity.cols + THREADS_X - 1)/THREADS_X, (intensity.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); contrast.create(intensity.size(), CV_8UC2); hipLaunchKernelGGL(( mean_subtract_kernel<3>), dim3(gridSize), dim3(blockSize), 0, stream, intensity.ptr<uchar>(), contrast.ptr<uchar2>(), intensity.step1(), contrast.step1()/2, intensity.cols, intensity.rows ); cudaSafeCall( hipGetLastError() ); }
f013489876e56dd987ffe7306f8a51ab16b5b3a9.cu
#include <ftl/operators/cuda/mls/multi_intensity.hpp> #include <opencv2/core/cuda_stream_accessor.hpp> #include <ftl/cuda/weighting.hpp> using ftl::cuda::MLSMultiIntensity; using cv::cuda::GpuMat; // ==== Multi image MLS ======================================================== __device__ inline float featureWeight(int f1, int f2) { const float w = (1.0f-(float(abs(f1 - f2)) / 255.0f)); return w*w*w; } __device__ inline float biasedLength(const float3 &Xi, const float3 &X) { float l = 0.0f; const float dx = Xi.x-X.x; l += 2.0f*dx*dx; const float dy = Xi.y-X.y; l += 2.0f*dy*dy; const float dz = Xi.z-X.z; l += dz*dz; return sqrt(l); } /* * Gather points for Moving Least Squares, from each source image */ template <int SEARCH_RADIUS, typename T> __global__ void mls_gather_intensity_kernel( const half4* __restrict__ normals_in, half4* __restrict__ normals_out, const float* __restrict__ depth_origin, const float* __restrict__ depth_in, const T* __restrict__ feature_origin, const T* __restrict__ feature_in, float4* __restrict__ centroid_out, float* __restrict__ contrib_out, float smoothing, float fsmoothing, float4x4 o_2_in, float4x4 in_2_o, float3x3 in_2_o33, ftl::rgbd::Camera camera_origin, ftl::rgbd::Camera camera_in, int npitch_out, int cpitch_out, int wpitch_out, int dpitch_o, int dpitch_i, int npitch_in, int fpitch_o, int fpitch_i ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < 0 || y < 0 || x >= camera_origin.width || y >= camera_origin.height) return; //float3 nX = make_float3(normals_out[y*npitch_out+x]); //float3 aX = make_float3(centroid_out[y*cpitch_out+x]); //float contrib = contrib_out[y*wpitch_out+x]; float3 nX = make_float3(0.0f, 0.0f, 0.0f); float3 aX = make_float3(0.0f, 0.0f, 0.0f); float contrib = 0.0f; float d0 = depth_origin[x+y*dpitch_o]; if (d0 <= camera_origin.minDepth || d0 >= camera_origin.maxDepth) return; const uchar2 feature1 = feature_origin[x+y*fpitch_o]; // TODO: Could the origin depth actually be averaged with depth in other // image? So as to not bias towards the original view? float3 X = camera_origin.screenToCam((int)(x),(int)(y),d0); const float3 camPos = o_2_in * X; const int2 s = camera_in.camToScreen<int2>(camPos); // Move point off of original surface //X = camera_origin.screenToCam((int)(x),(int)(y),d0-0.005f); // TODO: Could dynamically adjust the smoothing factors depending upon the // number of matches. Meaning, if lots of good local and feature matches // then be less likely to include poorer matches. Conversely, if only poor // non-local or feature distance matches, then increase search range. // Could also adapt smoothing parameters using variance or some other local // image measures. Or by just considering distance of the central projected // points as an indication of miss-alignment. Both spatial distance and // feature distance could be used to adjust parameters. // FIXME: For own image, need to do something different than the below. // Otherwise smoothing factors become 0. float spatial_smoothing = (depth_origin == depth_in) ? 0.005f : 0.03f; // 3cm default float hf_intensity_smoothing = (depth_origin == depth_in) ? 100.0f : 50.0f; float mean_smoothing = (depth_origin == depth_in) ? 100.0f : 100.0f; if (depth_origin != depth_in && s.x >= 0 && s.x < camera_in.width && s.y >= 0 && s.y <= camera_in.height) { // Get depth at exact reprojection point const float d = depth_in[s.x+s.y*dpitch_i]; // Get feature at exact reprojection point const uchar2 feature2 = feature_in[s.x+s.y*fpitch_i]; if (d > camera_in.minDepth && d < camera_in.maxDepth) { spatial_smoothing = min(spatial_smoothing, smoothing * fabsf(camPos.z - d)); } hf_intensity_smoothing = smoothing * fabsf(float(feature2.x) - float(feature1.x)); //mean_smoothing = smoothing * fabsf(float(feature2.y) - float(feature1.y)); // Make start point the average of the two sources... const float3 reversePos = in_2_o * camera_in.screenToCam(s.x, s.y, d); X = X + (reversePos) / 2.0f; } // Make sure there is a minimum smoothing value spatial_smoothing = max(0.05f, spatial_smoothing); hf_intensity_smoothing = max(50.0f, hf_intensity_smoothing); //mean_smoothing = max(10.0f, mean_smoothing); // Check for neighbourhood symmetry and use to weight overall contribution float symx = 0.0f; float symy = 0.0f; // Neighbourhood for (int v=-SEARCH_RADIUS; v<=SEARCH_RADIUS; ++v) { for (int u=-SEARCH_RADIUS; u<=SEARCH_RADIUS; ++u) { const float d = (s.x+u >= 0 && s.x+u < camera_in.width && s.y+v >= 0 && s.y+v < camera_in.height) ? depth_in[s.x+u+(s.y+v)*dpitch_i] : 0.0f; if (d <= camera_in.minDepth || d >= camera_in.maxDepth) continue; // Point and normal of neighbour const float3 Xi = in_2_o * camera_in.screenToCam(s.x+u, s.y+v, d); const float3 Ni = make_float3(normals_in[s.x+u+(s.y+v)*npitch_in]); const uchar2 feature2 = feature_in[s.x+u+(s.y+v)*fpitch_i]; // Gauss approx weighting functions // Rule: spatially close and feature close is strong // Spatially far or feature far, then poor. // So take the minimum, must be close and feature close to get good value const float w_high_int = ftl::cuda::weighting(float(abs(int(feature1.x)-int(feature2.x))), hf_intensity_smoothing); const float w_mean_int = ftl::cuda::weighting(float(abs(int(feature1.y)-int(feature2.y))), mean_smoothing); const float w_space = ftl::cuda::spatialWeighting(X,Xi,spatial_smoothing); //const float w_space = ftl::cuda::weighting(biasedLength(Xi,X),spatial_smoothing); // TODO: Distance from cam squared // TODO: Angle from cam (dot of normal and ray) //const float w_lateral = ftl::cuda::weighting(sqrt(Xi.x*X.x + Xi.y*X.y), float(SEARCH_RADIUS)*camera_origin.fx/Xi.z); const float w = (length(Ni) > 0.0f) ? min(w_space, min(w_high_int, w_mean_int)) //w_space * w_high_int * w_mean_int // : 0.0f; // Mark as a symmetry contribution if (w > 0.0f) { if (u < 0) symx -= 1.0f; else if (u > 0) symx += 1.0f; if (v < 0) symy -= 1.0f; else if (v > 0) symy += 1.0f; } aX += Xi*w; nX += (in_2_o33 * Ni)*w; contrib += w; } } // Perfect symmetry means symx and symy == 0, therefore actual length can // be measure of asymmetry, so when inverted it can be used to weight result symx = fabsf(symx) / float(SEARCH_RADIUS); symy = fabsf(symy) / float(SEARCH_RADIUS); float l = 1.0f - sqrt(symx*symx+symy*symy); l = l*l; normals_out[y*npitch_out+x] = make_half4(make_float3(normals_out[y*npitch_out+x]) + nX*l, 0.0f); centroid_out[y*cpitch_out+x] = make_float4(make_float3(centroid_out[y*cpitch_out+x]) + aX*l, 0.0f); contrib_out[y*wpitch_out+x] = contrib_out[y*wpitch_out+x] + contrib*l; } /** * Convert accumulated values into estimate of depth and normals at pixel. */ __global__ void mls_reduce_kernel_2( const float4* __restrict__ centroid, const half4* __restrict__ normals, const float* __restrict__ contrib_out, half4* __restrict__ normals_out, float* __restrict__ depth, uchar4* __restrict__ colour, ftl::rgbd::Camera camera, int npitch_in, int cpitch_in, int wpitch, int npitch, int dpitch, int cpitch ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= 0 && y >= 0 && x < camera.width && y < camera.height) { float3 nX = make_float3(normals[y*npitch_in+x]); float3 aX = make_float3(centroid[y*cpitch_in+x]); float contrib = contrib_out[y*wpitch+x]; //depth[x+y*dpitch] = X.z; //normals_out[x+y*npitch] = make_half4(0.0f, 0.0f, 0.0f, 0.0f); float d0 = depth[x+y*dpitch]; //depth[x+y*dpitch] = 0.0f; if (d0 <= camera.minDepth || d0 >= camera.maxDepth || contrib == 0.0f) return; float3 X = camera.screenToCam((int)(x),(int)(y),d0); nX /= contrib; // Weighted average normal aX /= contrib; // Weighted average point (centroid) // Signed-Distance Field function float fX = nX.x * (X.x - aX.x) + nX.y * (X.y - aX.y) + nX.z * (X.z - aX.z); // Calculate new point using SDF function to adjust depth (and position) X = X - nX * fX; depth[x+y*dpitch] = X.z; normals_out[x+y*npitch] = make_half4(nX / length(nX), 0.0f); if (colour) { int2 s = camera.camToScreen<int2>(X); float pd = min(1.0f, max(0.0f, X.z-d0) / 0.002f); float nd = min(1.0f, -min(0.0f, X.z-d0) / 0.002f); colour[x+y*cpitch] = (abs(s.x - x) > 1 || abs(s.y - y) > 1) ? make_uchar4(0,255,0,255) : make_uchar4( 255.0f - pd*255.0f, 255.0f - pd*255.0f - nd*255.0f, 255.0f - nd*255.0f, 255.0f ); } } } MLSMultiIntensity::MLSMultiIntensity(int radius) : radius_(radius) { } MLSMultiIntensity::~MLSMultiIntensity() { } void MLSMultiIntensity::prime( const GpuMat &depth_prime, const GpuMat &intensity_prime, const ftl::rgbd::Camera &cam_prime, const float4x4 &pose_prime, cudaStream_t stream) { depth_prime_ = depth_prime; intensity_prime_ = intensity_prime; cam_prime_ = cam_prime; pose_prime_ = pose_prime; centroid_accum_.create(depth_prime.size(), CV_32FC4); normal_accum_.create(depth_prime.size(), CV_16FC4); weight_accum_.create(depth_prime.size(), CV_32F); cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream); // Reset buffers centroid_accum_.setTo(cv::Scalar(0,0,0,0), cvstream); weight_accum_.setTo(cv::Scalar(0), cvstream); normal_accum_.setTo(cv::Scalar(0,0,0,0), cvstream); } void MLSMultiIntensity::gatherPrime(float smoothing, cudaStream_t stream) { // Can use a simpler kernel without pose transformations } void MLSMultiIntensity::gather( const GpuMat &depth_src, const GpuMat &normals_src, const GpuMat &intensity_src, const ftl::rgbd::Camera &cam_src, const float4x4 &pose_src, float smoothing, float fsmoothing, cudaStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); float4x4 inv_pose_src = pose_src; inv_pose_src.invert(); float4x4 o_2_in = inv_pose_src * pose_prime_; float4x4 inv_pose_prime = pose_prime_; inv_pose_prime.invert(); float4x4 in_2_o = inv_pose_prime * pose_src; float3x3 in_2_o33 = inv_pose_prime.getFloat3x3() * pose_src.getFloat3x3(); mls_gather_intensity_kernel<3><<<gridSize, blockSize, 0, stream>>>( normals_src.ptr<half4>(), normal_accum_.ptr<half4>(), depth_prime_.ptr<float>(), depth_src.ptr<float>(), intensity_prime_.ptr<uchar2>(), intensity_src.ptr<uchar2>(), centroid_accum_.ptr<float4>(), weight_accum_.ptr<float>(), smoothing, fsmoothing, o_2_in, in_2_o, in_2_o33, cam_prime_, cam_src, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), depth_prime_.step1(), depth_src.step1(), normals_src.step1()/4, intensity_prime_.step1()/2, intensity_src.step1()/2 ); cudaSafeCall( cudaGetLastError() ); } void MLSMultiIntensity::adjust( GpuMat &depth_out, GpuMat &normals_out, cudaStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); normals_out.create(depth_prime_.size(), CV_16FC4); depth_out.create(depth_prime_.size(), CV_32F); // FIXME: Depth prime assumed to be same as depth out mls_reduce_kernel_2<<<gridSize, blockSize, 0, stream>>>( centroid_accum_.ptr<float4>(), normal_accum_.ptr<half4>(), weight_accum_.ptr<float>(), normals_out.ptr<half4>(), depth_prime_.ptr<float>(), nullptr, cam_prime_, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), normals_out.step1()/4, depth_prime_.step1(), 0 ); cudaSafeCall( cudaGetLastError() ); } void MLSMultiIntensity::adjust( GpuMat &depth_out, GpuMat &normals_out, GpuMat &colour_out, cudaStream_t stream) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((depth_prime_.cols + THREADS_X - 1)/THREADS_X, (depth_prime_.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); normals_out.create(depth_prime_.size(), CV_16FC4); depth_out.create(depth_prime_.size(), CV_32F); // FIXME: Depth prime assumed to be same as depth out mls_reduce_kernel_2<<<gridSize, blockSize, 0, stream>>>( centroid_accum_.ptr<float4>(), normal_accum_.ptr<half4>(), weight_accum_.ptr<float>(), normals_out.ptr<half4>(), depth_prime_.ptr<float>(), colour_out.ptr<uchar4>(), cam_prime_, normal_accum_.step1()/4, centroid_accum_.step1()/4, weight_accum_.step1(), normals_out.step1()/4, depth_prime_.step1(), colour_out.step1()/4 ); cudaSafeCall( cudaGetLastError() ); } // ============================================================================= template <int RADIUS> __global__ void mean_subtract_kernel( const uchar* __restrict__ intensity, uchar2* __restrict__ contrast, int pitch, int cpitch, int width, int height ) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; if (x >= RADIUS && y >= RADIUS && x < width-RADIUS && y < height-RADIUS) { float mean = 0.0f; for (int v=-RADIUS; v<=RADIUS; ++v) { for (int u=-RADIUS; u<=RADIUS; ++u) { mean += float(intensity[x+u+(y+v)*pitch]); } } mean /= float((2*RADIUS+1)*(2*RADIUS+1)); float diff = float(intensity[x+y*pitch]) - mean; contrast[x+y*cpitch] = make_uchar2(max(0, min(254, int(diff)+127)), int(mean)); } } void ftl::cuda::mean_subtract( const cv::cuda::GpuMat &intensity, cv::cuda::GpuMat &contrast, int radius, cudaStream_t stream ) { static constexpr int THREADS_X = 8; static constexpr int THREADS_Y = 8; const dim3 gridSize((intensity.cols + THREADS_X - 1)/THREADS_X, (intensity.rows + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); contrast.create(intensity.size(), CV_8UC2); mean_subtract_kernel<3><<<gridSize, blockSize, 0, stream>>>( intensity.ptr<uchar>(), contrast.ptr<uchar2>(), intensity.step1(), contrast.step1()/2, intensity.cols, intensity.rows ); cudaSafeCall( cudaGetLastError() ); }
5afe03eb47115311403a10a8c64be17db7ed0004.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _SLLN_KERNEL_CH_ #define _SLLN_KERNEL_CH_ #include <helper_functions.h> #include <helper_math.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #include <math.h> #include <string> #include <typeinfo> #include <vector> __device__ float *d_gray, *d_gray_noise; __device__ float3 *d_color; //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host // call returns an error #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(EXIT_FAILURE); } } __global__ void remosaic(float3 *id, float *od, int width, int height) { // assume rggb bayer pattern const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; const int tid = yIndex + xIndex * height; int i = (2-((xIndex % 2) + (yIndex % 2))); switch (i) { case 0: od[yIndex + xIndex * height] = id[tid].x; break; case 1: od[yIndex + xIndex * height] = id[tid].y; break; case 2: od[yIndex + xIndex * height] = id[tid].z; break; } } __global__ void apply_slln(float *id, float *od, float *numbers, int width, int height, float ill_mult, float noise) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; int ind = yIndex + xIndex * height; hiprandState_t state; hiprand_init(clock64(), ind, 0, &state); numbers[ind] = hiprand_normal(&state); od[ind] = min(1.0f,max(0.0f,id[ind] * ill_mult + numbers[ind] * noise)); } __global__ void demosaic(float *id, float3 *od, int width, int height) { // id is single channel image // od is 3-channel RGB image const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; int o_i_j = yIndex + height*xIndex; int o_1i_j = (yIndex-1) + height*xIndex; int o_i1_j = (yIndex+1) + height*xIndex; int o_2i_j = (yIndex-2) + height*xIndex; int o_i2_j = (yIndex+2) + height*xIndex; int o_i_1j = yIndex + height*(xIndex-1); int o_i_j1 = yIndex + height*(xIndex+1); int o_i_2j = yIndex + height*(xIndex-2); int o_i_j2 = yIndex + height*(xIndex+2); int o_1i_1j = (yIndex-1) + height*(xIndex-1); int o_i1_1j = (yIndex+1) + height*(xIndex-1); int o_1i_j1 = (yIndex-1) + height*(xIndex+1); int o_i1_j1 = (yIndex+1) + height*(xIndex+1); int _1y = (yIndex-1) >= 0; int y1 = (yIndex+1) < height; int _2y = (yIndex-2) >= 0; int y2 = (yIndex+2) < height; int _1x = (xIndex-1) >= 0; int x1 = (xIndex+1) < width; int _2x = (xIndex-2) >= 0; int x2 = (xIndex+2) < width; float id_i_j = id[o_i_j]; float id_1i_j = id[o_1i_j] * _1y; float id_i_1j = id[o_i_1j] * _1x; float id_i1_j = id[o_i1_j] * y1; float id_i_j1 = id[o_i_j1] * x1; float id_2i_j = id[o_2i_j] * _2y; float id_i_2j = id[o_i_2j] * _2x; float id_i2_j = id[o_i2_j] * y2; float id_i_j2 = id[o_i_j2] * x2; float id_1i_1j = id[o_1i_1j] * (_1y & _1x); float id_i1_1j = id[o_i1_1j] * (y1 & _1x); float id_1i_j1 = id[o_1i_j1] * (_1y & x1); float id_i1_j1 = id[o_i1_j1] * (y1 & x1); const float Fij = id_i_j; //symmetric 4,2,-1 response - cross const float R1 = (4*id_i_j + 2*(id_1i_j + id_i_1j + id_i1_j + id_i_j1) - id_2i_j - id_i2_j - id_i_2j - id_i_j2) / (4 + 2*(_1y + _1x + y1 + x1) - _2y - y2 - _2x - x2); //left-right symmetric response - with .5 + height*1 + height*4 + height*5 - theta const float R2 = ( 8*(id_1i_j + id_i1_j) +10*id_i_j + id_i_2j + id_i_j2 - 2*((id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) + id_2i_j + id_i2_j) ) / (8*(_1y + y1) +10 + _2x + x2 - 2*(((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1)) + _2y + y2)); //top-bottom symmetric response - with .5 + height*1 + height*4 + height*5 - phi const float R3 = ( 8*(id_i_1j + id_i_j1) +10*id_i_j + id_2i_j + id_i2_j - 2*((id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) + id_i_2j + id_i_j2) ) / (8*(_1x + x1) +10 + _2y + y2 - 2*(((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1)) + _2x + x2)); //symmetric 3/2s response - checker const float R4 = ( 12*id_i_j - 3*(id_2i_j + id_i2_j + id_i_2j + id_i_j2) + 4*(id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) ) / (12 - 3*(_2y + y2 + _2x + x2) + 4*((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1))); const float G_at_red_or_blue = R1; const float R_at_G_in_red = R2; const float B_at_G_in_blue = R2; const float R_at_G_in_blue = R3; const float B_at_G_in_red = R3; const float R_at_B = R4; const float B_at_R = R4; //RGGB -> RedXY = (0, 0), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (1, 1) //GRBG -> RedXY = (1, 0), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (0, 1) //GBRG -> RedXY = (0, 1), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (1, 0) //BGGR -> RedXY = (1, 1), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (0, 0) const int r_mod_2 = xIndex & 1; const int c_mod_2 = yIndex & 1; #define is_rggb (true) #define is_grbg (false) #define is_gbrg (false) #define is_bggr (false) const int red_col = is_grbg | is_bggr; const int red_row = is_gbrg | is_bggr; const int blue_col = 1 - red_col; const int blue_row = 1 - red_row; const int in_red_row = r_mod_2 == red_row; const int in_blue_row = r_mod_2 == blue_row; const int is_red_pixel = (r_mod_2 == red_row) & (c_mod_2 == red_col); const int is_blue_pixel = (r_mod_2 == blue_row) & (c_mod_2 == blue_col); const int is_green_pixel = !(is_red_pixel | is_blue_pixel); assert(is_green_pixel + is_blue_pixel + is_red_pixel == 1); assert(in_red_row + in_blue_row == 1); //at R locations: R is original //at B locations it is the 3/2s symmetric response //at G in red rows it is the left-right symmmetric with 4s //at G in blue rows it is the top-bottom symmetric with 4s float red = Fij * is_red_pixel + R_at_B * is_blue_pixel + R_at_G_in_red * (is_green_pixel & in_red_row) + R_at_G_in_blue * (is_green_pixel & in_blue_row); //at B locations: B is original //at R locations it is the 3/2s symmetric response //at G in red rows it is the top-bottom symmmetric with 4s //at G in blue rows it is the left-right symmetric with 4s float blue = Fij * is_blue_pixel + B_at_R * is_red_pixel + B_at_G_in_red * (is_green_pixel & in_red_row) + B_at_G_in_blue * (is_green_pixel & in_blue_row); //at G locations: G is original //at R locations: symmetric 4,2,-1 //at B locations: symmetric 4,2,-1 float green = Fij * is_green_pixel + G_at_red_or_blue * (!is_green_pixel); od[o_i_j].x = blue; od[o_i_j].y = green; od[o_i_j].z = red; } extern "C" void initSLLN(int width, int height) { checkCudaErrors(hipMalloc((void **)&d_color, width * sizeof(float3) * height)); checkCudaErrors(hipMalloc((void **)&d_gray, width * sizeof(float) * height)); checkCudaErrors(hipMalloc((void **)&d_gray_noise, width * sizeof(float) * height)); checkCudaErrors(hipDeviceSynchronize()); } extern "C" void endSLLN() { checkCudaErrors(hipFree(d_color)); checkCudaErrors(hipFree(d_gray)); checkCudaErrors(hipFree(d_gray_noise)); checkCudaErrors(hipDeviceSynchronize()); } extern "C" void applySLLN(const float3 &input, float3 &output, int block_size, int width, int height, float ill, float noise) { const int colorBytes = width * sizeof(float3) * height; checkCudaErrors(hipMemcpy(d_color, &input, colorBytes, hipMemcpyHostToDevice)); // Specify a reasonable block size const dim3 block(block_size, block_size); const dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y); hipLaunchKernelGGL(( remosaic), dim3(grid), dim3(block), 0, 0, d_color, d_gray, width, height); // Synchronize to check for any kernel launch errors checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( apply_slln), dim3(grid), dim3(block), 0, 0, d_gray, d_gray, d_gray_noise, width, height, ill, noise); checkCudaErrors(hipDeviceSynchronize()); hipLaunchKernelGGL(( demosaic), dim3(grid), dim3(block), 0, 0, d_gray, d_color, width, height); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(&output, d_color, colorBytes, hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); // checkCudaErrors(hipFree(d_color)); // checkCudaErrors(hipFree(d_gray)); return; } #endif // #ifndef _SEQSLAM_KERNEL_H_
5afe03eb47115311403a10a8c64be17db7ed0004.cu
#ifndef _SLLN_KERNEL_CH_ #define _SLLN_KERNEL_CH_ #include <helper_functions.h> #include <helper_math.h> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <curand_kernel.h> #include <curand_normal.h> #include <cuda_runtime.h> #include <math.h> #include <string> #include <typeinfo> #include <vector> __device__ float *d_gray, *d_gray_noise; __device__ float3 *d_color; //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host // call returns an error #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } __global__ void remosaic(float3 *id, float *od, int width, int height) { // assume rggb bayer pattern const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; const int tid = yIndex + xIndex * height; int i = (2-((xIndex % 2) + (yIndex % 2))); switch (i) { case 0: od[yIndex + xIndex * height] = id[tid].x; break; case 1: od[yIndex + xIndex * height] = id[tid].y; break; case 2: od[yIndex + xIndex * height] = id[tid].z; break; } } __global__ void apply_slln(float *id, float *od, float *numbers, int width, int height, float ill_mult, float noise) { const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; int ind = yIndex + xIndex * height; curandState state; curand_init(clock64(), ind, 0, &state); numbers[ind] = curand_normal(&state); od[ind] = min(1.0f,max(0.0f,id[ind] * ill_mult + numbers[ind] * noise)); } __global__ void demosaic(float *id, float3 *od, int width, int height) { // id is single channel image // od is 3-channel RGB image const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex >= width) || (yIndex >= height)) return; int o_i_j = yIndex + height*xIndex; int o_1i_j = (yIndex-1) + height*xIndex; int o_i1_j = (yIndex+1) + height*xIndex; int o_2i_j = (yIndex-2) + height*xIndex; int o_i2_j = (yIndex+2) + height*xIndex; int o_i_1j = yIndex + height*(xIndex-1); int o_i_j1 = yIndex + height*(xIndex+1); int o_i_2j = yIndex + height*(xIndex-2); int o_i_j2 = yIndex + height*(xIndex+2); int o_1i_1j = (yIndex-1) + height*(xIndex-1); int o_i1_1j = (yIndex+1) + height*(xIndex-1); int o_1i_j1 = (yIndex-1) + height*(xIndex+1); int o_i1_j1 = (yIndex+1) + height*(xIndex+1); int _1y = (yIndex-1) >= 0; int y1 = (yIndex+1) < height; int _2y = (yIndex-2) >= 0; int y2 = (yIndex+2) < height; int _1x = (xIndex-1) >= 0; int x1 = (xIndex+1) < width; int _2x = (xIndex-2) >= 0; int x2 = (xIndex+2) < width; float id_i_j = id[o_i_j]; float id_1i_j = id[o_1i_j] * _1y; float id_i_1j = id[o_i_1j] * _1x; float id_i1_j = id[o_i1_j] * y1; float id_i_j1 = id[o_i_j1] * x1; float id_2i_j = id[o_2i_j] * _2y; float id_i_2j = id[o_i_2j] * _2x; float id_i2_j = id[o_i2_j] * y2; float id_i_j2 = id[o_i_j2] * x2; float id_1i_1j = id[o_1i_1j] * (_1y & _1x); float id_i1_1j = id[o_i1_1j] * (y1 & _1x); float id_1i_j1 = id[o_1i_j1] * (_1y & x1); float id_i1_j1 = id[o_i1_j1] * (y1 & x1); const float Fij = id_i_j; //symmetric 4,2,-1 response - cross const float R1 = (4*id_i_j + 2*(id_1i_j + id_i_1j + id_i1_j + id_i_j1) - id_2i_j - id_i2_j - id_i_2j - id_i_j2) / (4 + 2*(_1y + _1x + y1 + x1) - _2y - y2 - _2x - x2); //left-right symmetric response - with .5 + height*1 + height*4 + height*5 - theta const float R2 = ( 8*(id_1i_j + id_i1_j) +10*id_i_j + id_i_2j + id_i_j2 - 2*((id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) + id_2i_j + id_i2_j) ) / (8*(_1y + y1) +10 + _2x + x2 - 2*(((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1)) + _2y + y2)); //top-bottom symmetric response - with .5 + height*1 + height*4 + height*5 - phi const float R3 = ( 8*(id_i_1j + id_i_j1) +10*id_i_j + id_2i_j + id_i2_j - 2*((id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) + id_i_2j + id_i_j2) ) / (8*(_1x + x1) +10 + _2y + y2 - 2*(((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1)) + _2x + x2)); //symmetric 3/2s response - checker const float R4 = ( 12*id_i_j - 3*(id_2i_j + id_i2_j + id_i_2j + id_i_j2) + 4*(id_1i_1j + id_i1_1j + id_1i_j1 + id_i1_j1) ) / (12 - 3*(_2y + y2 + _2x + x2) + 4*((_1y & _1x) + (y1 & _1x) + (_1y & x1) + (y1 & x1))); const float G_at_red_or_blue = R1; const float R_at_G_in_red = R2; const float B_at_G_in_blue = R2; const float R_at_G_in_blue = R3; const float B_at_G_in_red = R3; const float R_at_B = R4; const float B_at_R = R4; //RGGB -> RedXY = (0, 0), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (1, 1) //GRBG -> RedXY = (1, 0), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (0, 1) //GBRG -> RedXY = (0, 1), GreenXY1 = (0, 0), GreenXY2 = (1, 1), BlueXY = (1, 0) //BGGR -> RedXY = (1, 1), GreenXY1 = (1, 0), GreenXY2 = (0, 1), BlueXY = (0, 0) const int r_mod_2 = xIndex & 1; const int c_mod_2 = yIndex & 1; #define is_rggb (true) #define is_grbg (false) #define is_gbrg (false) #define is_bggr (false) const int red_col = is_grbg | is_bggr; const int red_row = is_gbrg | is_bggr; const int blue_col = 1 - red_col; const int blue_row = 1 - red_row; const int in_red_row = r_mod_2 == red_row; const int in_blue_row = r_mod_2 == blue_row; const int is_red_pixel = (r_mod_2 == red_row) & (c_mod_2 == red_col); const int is_blue_pixel = (r_mod_2 == blue_row) & (c_mod_2 == blue_col); const int is_green_pixel = !(is_red_pixel | is_blue_pixel); assert(is_green_pixel + is_blue_pixel + is_red_pixel == 1); assert(in_red_row + in_blue_row == 1); //at R locations: R is original //at B locations it is the 3/2s symmetric response //at G in red rows it is the left-right symmmetric with 4s //at G in blue rows it is the top-bottom symmetric with 4s float red = Fij * is_red_pixel + R_at_B * is_blue_pixel + R_at_G_in_red * (is_green_pixel & in_red_row) + R_at_G_in_blue * (is_green_pixel & in_blue_row); //at B locations: B is original //at R locations it is the 3/2s symmetric response //at G in red rows it is the top-bottom symmmetric with 4s //at G in blue rows it is the left-right symmetric with 4s float blue = Fij * is_blue_pixel + B_at_R * is_red_pixel + B_at_G_in_red * (is_green_pixel & in_red_row) + B_at_G_in_blue * (is_green_pixel & in_blue_row); //at G locations: G is original //at R locations: symmetric 4,2,-1 //at B locations: symmetric 4,2,-1 float green = Fij * is_green_pixel + G_at_red_or_blue * (!is_green_pixel); od[o_i_j].x = blue; od[o_i_j].y = green; od[o_i_j].z = red; } extern "C" void initSLLN(int width, int height) { checkCudaErrors(cudaMalloc((void **)&d_color, width * sizeof(float3) * height)); checkCudaErrors(cudaMalloc((void **)&d_gray, width * sizeof(float) * height)); checkCudaErrors(cudaMalloc((void **)&d_gray_noise, width * sizeof(float) * height)); checkCudaErrors(cudaDeviceSynchronize()); } extern "C" void endSLLN() { checkCudaErrors(cudaFree(d_color)); checkCudaErrors(cudaFree(d_gray)); checkCudaErrors(cudaFree(d_gray_noise)); checkCudaErrors(cudaDeviceSynchronize()); } extern "C" void applySLLN(const float3 &input, float3 &output, int block_size, int width, int height, float ill, float noise) { const int colorBytes = width * sizeof(float3) * height; checkCudaErrors(cudaMemcpy(d_color, &input, colorBytes, cudaMemcpyHostToDevice)); // Specify a reasonable block size const dim3 block(block_size, block_size); const dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y); remosaic<<<grid, block>>>(d_color, d_gray, width, height); // Synchronize to check for any kernel launch errors checkCudaErrors(cudaDeviceSynchronize()); apply_slln<<<grid, block>>>(d_gray, d_gray, d_gray_noise, width, height, ill, noise); checkCudaErrors(cudaDeviceSynchronize()); demosaic<<<grid, block>>>(d_gray, d_color, width, height); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(&output, d_color, colorBytes, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); // checkCudaErrors(cudaFree(d_color)); // checkCudaErrors(cudaFree(d_gray)); return; } #endif // #ifndef _SEQSLAM_KERNEL_H_
27ab560bd384edaf3f2a7da038684621c46ae8e6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "weight_vector_bound_cuda_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <hip/hip_runtime.h> #include <boost/format.hpp> #include <stack> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it) { unsigned int layer_id = it->first; if (layer_id < testing_layer_count) throw neural_network_exception((boost::format("Weight vector bound is specified fo layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str()); weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config))); } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<network_data_smart_ptr>& learning_rate_vector_list, std::vector<network_data_smart_ptr>& data_list) { std::vector<testing_result_smart_ptr> res; reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size()); if (updater_entry_count == 0) return res; for(unsigned int i = 0; i < learning_rate_vector_list.size(); ++i) res.push_back(testing_result_smart_ptr(new testing_result(ef))); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = enqueue_get_data(data_list, *command_stream); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = enqueue_get_learning_rate(learning_rate_vector_list, *command_stream); buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(updater_entry_count * sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(hipMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), hipMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers; for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count))); cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float))); float * output = *output_host_buf; // zero mse cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, updater_entry_count, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( convert_compacted_to_raw_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, *command_stream, *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(hipMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), hipMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id) { std::stack<unsigned int> offset_list; // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( it == updater_list.begin() ? input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); } } // Compute errors { ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, input_entry_id, output_neuron_count, updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator learning_rate_data_it = learning_rate_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++learning_rate_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it) { if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? input_entry_id : 0, *command_stream, *net_data_it, *schema_data_it, *learning_rate_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id); if (bound_it != weight_vector_bounds.end()) { const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second; const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second; bound_it->second->enqueue_normalize_weights( *command_stream, bound, *net_data_it, additional_buffers, updater_entry_count); } } } if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // for(unsigned int input_entry_id entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(hipEventRecord(data_processed_event, *command_stream)); cuda_safe_call(hipEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(hipStreamSynchronize(*data_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } read_data(net_data, data_list, *command_stream); std::vector<double> error_list(updater_entry_count); cuda_safe_call(hipMemcpyAsync(&(*error_list.begin()), *error_buf, error_list.size() * sizeof(double), hipMemcpyDeviceToHost, *command_stream)); cuda_safe_call(hipStreamSynchronize(*command_stream)); for(unsigned int i = 0; i < updater_entry_count; ++i) res[i]->init(error_list[i], entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_learning_rate( const std::vector<network_data_smart_ptr>& learning_rate_list, hipStream_t stream_id) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; const network_data_smart_ptr& first_data = learning_rate_list[0]; for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id) { std::vector<const_cuda_linear_buffer_device_smart_ptr> buffer_list; unsigned int subindex = 0; for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex) { size_t single_size = it->size(); std::vector<float> pack(single_size * learning_rate_list.size()); std::vector<float>::iterator fill_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = learning_rate_list.begin(); sample_it != learning_rate_list.end(); sample_it++) { const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex); fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it); } buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device( &(*pack.begin()), pack.size() * sizeof(float), stream_id))); } res.push_back(buffer_list); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_data( const std::vector<network_data_smart_ptr>& data_list, hipStream_t stream_id) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; const network_data_smart_ptr& first_data = data_list[0]; for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id) { std::vector<cuda_linear_buffer_device_smart_ptr> buffer_list; unsigned int subindex = 0; for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex) { size_t single_size = it->size(); std::vector<float> pack(single_size * data_list.size()); std::vector<float>::iterator fill_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = data_list.begin(); sample_it != data_list.end(); sample_it++) { const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex); fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it); } buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device( &(*pack.begin()), pack.size() * sizeof(float), stream_id))); } res.push_back(buffer_list); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, std::vector<network_data_smart_ptr>& res, hipStream_t stream_id) const { const network_data_smart_ptr& first_data = res[0]; unsigned int layer_id = testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) { unsigned int subindex = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator src_it2 = src_it->begin(); src_it2 != src_it->end(); ++src_it2, ++subindex) { cuda_linear_buffer_device_smart_ptr src = *src_it2; std::vector<float> pack(src->get_size() / sizeof(float)); cuda_safe_call(hipMemcpyAsync(&(*pack.begin()), *src, pack.size() * sizeof(float), hipMemcpyDeviceToHost, stream_id)); cuda_safe_call(hipStreamSynchronize(stream_id)); std::vector<float>::const_iterator src_buf_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++) { std::vector<float>& dst_buf = (*sample_it)->at(layer_id)->at(subindex); std::copy(src_buf_it, src_buf_it + dst_buf.size(), dst_buf.begin()); src_buf_it += dst_buf.size(); } } } } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_max_batch_size() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( hipStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); hipLaunchKernelGGL(( dropout_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } } }
27ab560bd384edaf3f2a7da038684621c46ae8e6.cu
/* * Copyright 2011-2014 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "network_updater_cuda.h" #include "neural_network_cuda_exception.h" #include "layer_testing_schema_factory.h" #include "cuda_linear_buffer_device.h" #include "cuda_linear_buffer_host.h" #include "util_cuda.h" #include "cuda_event.h" #include "layer_updater_schema_factory.h" #include "weight_vector_bound_cuda_factory.h" #include "supervised_data_reader_async_helper.h" #include "error_function_updater_cuda_factory.h" #include "../nn_types.h" #include <cuda_runtime.h> #include <boost/format.hpp> #include <stack> namespace nnforge { namespace cuda { __global__ void convert_compacted_to_raw_upd_kernel( const uchar4 * __restrict input, float4 * __restrict output, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { uchar4 inp = input[elem_id]; float4 val; val.x = inp.x * (1.0F / 255.0F); val.y = inp.y * (1.0F / 255.0F); val.z = inp.z * (1.0F / 255.0F); val.w = inp.w * (1.0F / 255.0F); output[elem_id] = val; } } __global__ void dropout_kernel( float * __restrict neurons, const float * __restrict random_buf, float dropout_rate, int offset, unsigned int mask, int elem_count) { int elem_id = blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x) + threadIdx.x; if (elem_id < elem_count) { unsigned int random_elem_id = (elem_id + offset) & mask; if (random_buf[random_elem_id] < dropout_rate) neurons[elem_id] = 0.0F; } } unsigned int network_updater_cuda::max_entry_count_in_single_batch = 1024; network_updater_cuda::network_updater_cuda( network_schema_smart_ptr schema, const_error_function_smart_ptr ef, const std::map<unsigned int, float>& layer_to_dropout_rate_map, const std::map<unsigned int, weight_vector_bound>& layer_to_weight_vector_bound_map, cuda_running_configuration_const_smart_ptr cuda_config) : network_updater(schema, ef, layer_to_dropout_rate_map, layer_to_weight_vector_bound_map) , cuda_config(cuda_config) { const const_layer_list& layer_list = *schema; testing_layer_count = 0; start_layer_nonempty_weights_iterator = layer_list.begin(); for(const_layer_list::const_iterator it = layer_list.begin(); it != layer_list.end(); ++it) { start_layer_nonempty_weights_iterator = it; if (!(*it)->is_empty_data()) break; testing_layer_count++; } for(const_layer_list::const_iterator it = layer_list.begin(); it != start_layer_nonempty_weights_iterator; ++it) testing_schemas.push_back(single_layer_testing_schema_factory::get_const_instance().create_testing_schema_layer(*it, cuda_config)); for(const_layer_list::const_iterator it = start_layer_nonempty_weights_iterator; it != layer_list.end(); ++it) updater_schemas.push_back(single_layer_updater_schema_factory::get_const_instance().create_updater_schema_layer(*it, cuda_config)); for(std::map<unsigned int, weight_vector_bound>::const_iterator it = this->layer_to_weight_vector_bound_map.begin(); it != this->layer_to_weight_vector_bound_map.end(); ++it) { unsigned int layer_id = it->first; if (layer_id < testing_layer_count) throw neural_network_exception((boost::format("Weight vector bound is specified fo layer %1% while it is in testing part (consisting of %2% layers) of the updater") % layer_id % testing_layer_count).str()); weight_vector_bounds.insert(std::make_pair(layer_id, single_weight_vector_bound_factory::get_const_instance().create_weight_vector_bound(layer_list[layer_id], cuda_config))); } ef_updater = single_error_function_updater_cuda_factory::get_const_instance().get_error_function_updater_cuda(ef->get_uuid()); setup_network_cuda(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it) testing_schema_data.push_back((*it)->get_schema_buffers()); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it) updater_schema_data.push_back((*it)->get_schema_buffers()); } network_updater_cuda::~network_updater_cuda() { } void network_updater_cuda::setup_network_cuda() { command_stream = cuda_stream_smart_ptr(new cuda_stream()); data_stream = cuda_stream_smart_ptr(new cuda_stream()); } std::vector<testing_result_smart_ptr> network_updater_cuda::actual_update( supervised_data_reader& reader, const std::vector<network_data_smart_ptr>& learning_rate_vector_list, std::vector<network_data_smart_ptr>& data_list) { std::vector<testing_result_smart_ptr> res; reader.reset(); layer_configuration_specific input_configuration = reader.get_input_configuration(); layer_configuration_specific output_configuration = reader.get_output_configuration(); unsigned int input_neuron_count = input_configuration.get_neuron_count(); unsigned int output_neuron_count = output_configuration.get_neuron_count(); unsigned int input_neuron_count_per_feature_map = input_configuration.get_neuron_count_per_feature_map(); neuron_data_type::input_type type_code = reader.get_input_type(); size_t input_neuron_elem_size = reader.get_input_neuron_elem_size(); unsigned int updater_entry_count = static_cast<unsigned int>(data_list.size()); if (updater_entry_count == 0) return res; for(unsigned int i = 0; i < learning_rate_vector_list.size(); ++i) res.push_back(testing_result_smart_ptr(new testing_result(ef))); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > net_data = enqueue_get_data(data_list, *command_stream); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > learning_rate_data = enqueue_get_learning_rate(learning_rate_vector_list, *command_stream); buffer_cuda_size_configuration buffers_config; update_buffers_configuration(buffers_config, updater_entry_count); buffers_config.add_per_entry_linear_addressing_through_texture(layer_config_list[testing_layer_count].get_neuron_count()); // This is for the first updater to safely read input data through the texture buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * input_neuron_elem_size); // input buffers_config.add_per_entry_buffer(input_neuron_count * sizeof(float)); // converted input buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_per_entry_buffer(output_neuron_count * sizeof(float)); // output buffers_config.add_constant_buffer(output_neuron_count * sizeof(float) * updater_entry_count); // initial error buffers_config.add_constant_buffer(sizeof(double) * updater_entry_count); // error buffer if (!random_uniform_list.empty()) buffers_config.add_constant_buffer(random_uniform_list.size() * sizeof(float)); // random_uniform_list for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::const_iterator it = net_data.begin(); it != net_data.end(); ++it) for(std::vector<cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = learning_rate_data.begin(); it != learning_rate_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffers_config.add_constant_buffer((*it2)->get_size()); unsigned int max_entry_count = std::min<unsigned int>(std::min<unsigned int>(cuda_config->get_max_entry_count(buffers_config), reader.get_entry_count()), max_entry_count_in_single_batch); cuda_linear_buffer_device_smart_ptr input_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * input_neuron_elem_size)), }; cuda_linear_buffer_device_smart_ptr output_buf[2] = { cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(output_neuron_count * max_entry_count * sizeof(float))), }; cuda_linear_buffer_device_smart_ptr input_converted_buf(new cuda_linear_buffer_device(input_neuron_count * max_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr initial_error_buf(new cuda_linear_buffer_device(output_neuron_count * updater_entry_count * sizeof(float))); cuda_linear_buffer_device_smart_ptr error_buf(new cuda_linear_buffer_device(updater_entry_count * sizeof(double))); cuda_linear_buffer_device_smart_ptr random_uniform_buf; if (!random_uniform_list.empty()) { random_uniform_buf = cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device(random_uniform_list.size() * sizeof(float))); cuda_safe_call(cudaMemcpyAsync(*random_uniform_buf, &(*random_uniform_list.begin()), random_uniform_list.size() * sizeof(float), cudaMemcpyHostToDevice, *command_stream)); } cuda_linear_buffer_device_smart_ptr output_buffer = input_converted_buf; std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > > testing_input_and_additional_buffers_pack; for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it) { std::vector<cuda_linear_buffer_device_smart_ptr> additional_buffers = (*it)->allocate_additional_buffers(max_entry_count); testing_input_and_additional_buffers_pack.push_back(std::make_pair(output_buffer, additional_buffers)); output_buffer = (*it)->get_output_buffer(output_buffer, additional_buffers); } std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> > updater_input_and_all_buffers_pack; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it) { layer_updater_cuda::buffer_set all_buffers = (*it)->allocate_all_buffers(updater_entry_count); updater_input_and_all_buffers_pack.push_back(std::make_pair(output_buffer, all_buffers)); output_buffer = all_buffers.output_neurons_buffer; } std::vector<cuda_linear_buffer_device_smart_ptr> output_errors_buffers; cuda_linear_buffer_device_smart_ptr output_errors = initial_error_buf; for(std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator it = updater_input_and_all_buffers_pack.rbegin(); it != updater_input_and_all_buffers_pack.rend(); ++it) { output_errors_buffers.push_back(output_errors); layer_updater_cuda::buffer_set& all_buffers = it->second; if (all_buffers.input_errors_buffer != 0) output_errors = all_buffers.input_errors_buffer; } std::map<unsigned int, std::vector<cuda_linear_buffer_device_smart_ptr> > weight_vector_bound_buffers; for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) weight_vector_bound_buffers.insert(std::make_pair(it->first, it->second->allocate_additional_buffers(max_entry_count))); cuda_linear_buffer_host_smart_ptr input_host_buf(new cuda_linear_buffer_host(input_neuron_count * max_entry_count * input_neuron_elem_size)); unsigned char * input = *input_host_buf; cuda_linear_buffer_host_smart_ptr output_host_buf(new cuda_linear_buffer_host(output_neuron_count * max_entry_count * sizeof(float))); float * output = *output_host_buf; // zero mse cuda_util::set_with_value( *cuda_config, (double *)(*error_buf), 0.0, updater_entry_count, *command_stream); unsigned int current_data_slot = 0; unsigned int current_command_slot = 1; unsigned int entries_available_for_copy_in_count = reader.get_entry_count(); unsigned int entries_available_for_processing_count = 0; cuda_event data_processed_event; cuda_event input_copied_event; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } random_generator gen = rnd::get_random_generator(); nnforge_uniform_int_distribution<unsigned int> dist(0, static_cast<unsigned int>(random_uniform_list.size() - 1)); unsigned int mask = static_cast<unsigned int>(random_uniform_list.size() - 1); unsigned int entries_processed_count = 0; while((entries_available_for_copy_in_count > 0) || (entries_available_for_processing_count > 0)) { supervised_data_reader_async_helper async_reader; if (entries_available_for_copy_in_count > 0) { unsigned int entries_to_read_count = std::min<unsigned int>(max_entry_count, entries_available_for_copy_in_count); async_reader.fun = supervised_data_reader_functor( entries_to_read_count, &reader, input, output, *(input_buf[current_data_slot]), *(output_buf[current_data_slot]), *data_stream); async_reader.start(); } if (entries_available_for_processing_count > 0) { // Convert input if (type_code == neuron_data_type::type_byte) { int elem_count = (input_neuron_count * entries_available_for_processing_count + 3) / 4; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); convert_compacted_to_raw_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, *command_stream>>>( *input_buf[current_command_slot], *input_converted_buf, elem_count); } else if (type_code == neuron_data_type::type_float) { cuda_safe_call(cudaMemcpyAsync( *input_converted_buf, *input_buf[current_command_slot], input_neuron_count * entries_available_for_processing_count * sizeof(float), cudaMemcpyDeviceToDevice, *command_stream)); } else throw neural_network_exception((boost::format("actual_update cannot handle input neurons of type %1%") % type_code).str()); // Run ann { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, std::vector<cuda_linear_buffer_device_smart_ptr> > >::iterator input_and_additional_buffers_pack_it = testing_input_and_additional_buffers_pack.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = testing_schema_data.begin(); unsigned int layer_id = 0; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin(); for(std::vector<layer_tester_cuda_smart_ptr>::iterator it = tester_list.begin(); it != tester_list.end(); ++it, ++input_and_additional_buffers_pack_it, ++schema_data_it, ++layer_id, ++layer_config_it) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, input_and_additional_buffers_pack_it->first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_it->get_neuron_count(), offset); } (*it)->enqueue_test( *command_stream, *schema_data_it, std::vector<const_cuda_linear_buffer_device_smart_ptr>(), input_and_additional_buffers_pack_it->first, input_and_additional_buffers_pack_it->second, entries_available_for_processing_count); } } // Apply dropout to the input of the first updater layer { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(testing_layer_count); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); enqueue_dropout( *command_stream, random_uniform_buf, updater_input_and_all_buffers_pack[0].first, dropout_it->second, mask, entries_available_for_processing_count * layer_config_list[testing_layer_count].get_neuron_count(), offset); } } for(unsigned int input_entry_id = 0; input_entry_id < entries_available_for_processing_count; ++input_entry_id) { std::stack<unsigned int> offset_list; // Forward updater { std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.begin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator net_data_it = net_data.begin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::iterator schema_data_it = updater_schema_data.begin(); unsigned int layer_id = testing_layer_count; layer_configuration_specific_list::const_iterator layer_config_it = layer_config_list.begin() + testing_layer_count; for(std::vector<layer_updater_cuda_smart_ptr>::iterator it = updater_list.begin(); it != updater_list.end(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++net_data_it, ++layer_id, ++layer_config_it) { if (it != updater_list.begin()) { std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = dist(gen); offset_list.push(offset); enqueue_dropout( *command_stream, random_uniform_buf, input_and_all_buffers_pack_it->first, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_test( it == updater_list.begin() ? input_entry_id : 0, *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); } } // Compute errors { ef_updater->enqueue_update_error_and_gradient( *command_stream, initial_error_buf, error_buf, output_buf[current_command_slot], output_buffer, input_entry_id, output_neuron_count, updater_entry_count); } // Backward updater { std::vector<cuda_linear_buffer_device_smart_ptr>::iterator output_errors_it = output_errors_buffers.begin(); std::vector<std::pair<cuda_linear_buffer_device_smart_ptr, layer_updater_cuda::buffer_set> >::reverse_iterator input_and_all_buffers_pack_it = updater_input_and_all_buffers_pack.rbegin(); std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::reverse_iterator net_data_it = net_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator learning_rate_data_it = learning_rate_data.rbegin(); std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::reverse_iterator schema_data_it = updater_schema_data.rbegin(); unsigned int reverse_layer_id = static_cast<unsigned int>(updater_list.size() + testing_layer_count) - 1; layer_configuration_specific_list::const_reverse_iterator layer_config_it = layer_config_list.rbegin() + 1; for(std::vector<layer_updater_cuda_smart_ptr>::reverse_iterator it = updater_list.rbegin(); it != updater_list.rend(); ++it, ++input_and_all_buffers_pack_it, ++schema_data_it, ++learning_rate_data_it, ++output_errors_it, ++net_data_it, --reverse_layer_id, ++layer_config_it) { if (it != (updater_list.rend() - 1)) { (*it)->enqueue_backprop( *command_stream, *schema_data_it, *net_data_it, input_and_all_buffers_pack_it->second.output_neurons_buffer, input_and_all_buffers_pack_it->first, *output_errors_it, input_and_all_buffers_pack_it->second.input_errors_buffer, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); std::map<unsigned int, float>::const_iterator dropout_it = layer_to_dropout_rate_map.find(reverse_layer_id); if (dropout_it != layer_to_dropout_rate_map.end()) { unsigned int offset = offset_list.top(); offset_list.pop(); enqueue_dropout( *command_stream, random_uniform_buf, (input_and_all_buffers_pack_it->second.input_errors_buffer == 0) ? *output_errors_it : input_and_all_buffers_pack_it->second.input_errors_buffer, dropout_it->second, mask, updater_entry_count * layer_config_it->get_neuron_count(), offset); } } (*it)->enqueue_update_weights( (it == (updater_list.rend() - 1)) ? input_entry_id : 0, *command_stream, *net_data_it, *schema_data_it, *learning_rate_data_it, *output_errors_it, input_and_all_buffers_pack_it->first, input_and_all_buffers_pack_it->second.additional_buffers, input_and_all_buffers_pack_it->second.dynamic_memobjects, updater_entry_count); weight_vector_bound_map::iterator bound_it = weight_vector_bounds.find(reverse_layer_id); if (bound_it != weight_vector_bounds.end()) { const weight_vector_bound& bound = layer_to_weight_vector_bound_map.find(reverse_layer_id)->second; const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers = weight_vector_bound_buffers.find(reverse_layer_id)->second; bound_it->second->enqueue_normalize_weights( *command_stream, bound, *net_data_it, additional_buffers, updater_entry_count); } } } if (((input_entry_id % 16) == 1) && cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // for(unsigned int input_entry_id entries_processed_count += entries_available_for_processing_count; if (cuda_config->is_flush_required()) { cuda_safe_call(cudaEventRecord(data_processed_event, *command_stream)); cuda_safe_call(cudaEventQuery(data_processed_event)); } } // if (entries_available_for_processing_count > 0) unsigned int entries_read_count = 0; if (entries_available_for_copy_in_count > 0) entries_read_count = async_reader.wait(); cuda_safe_call(cudaStreamSynchronize(*data_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); entries_available_for_processing_count = entries_read_count; entries_available_for_copy_in_count -= entries_read_count; current_data_slot = 1 - current_data_slot; current_command_slot = 1 - current_command_slot; } read_data(net_data, data_list, *command_stream); std::vector<double> error_list(updater_entry_count); cuda_safe_call(cudaMemcpyAsync(&(*error_list.begin()), *error_buf, error_list.size() * sizeof(double), cudaMemcpyDeviceToHost, *command_stream)); cuda_safe_call(cudaStreamSynchronize(*command_stream)); for(unsigned int i = 0; i < updater_entry_count; ++i) res[i]->init(error_list[i], entries_processed_count); return res; } void network_updater_cuda::layer_config_list_modified() { layer_configuration_specific_list::const_iterator it_conf = layer_config_list.begin(); tester_list.clear(); for(const_layer_testing_schema_list::const_iterator it = testing_schemas.begin(); it != testing_schemas.end(); ++it, ++it_conf) { tester_list.push_back( (*it)->create_tester( *it_conf, *(it_conf + 1))); } updater_list.clear(); for(const_layer_updater_schema_list::const_iterator it = updater_schemas.begin(); it != updater_schemas.end(); ++it, ++it_conf) { updater_list.push_back( (*it)->create_updater( *it_conf, *(it_conf + 1), (it_conf > layer_config_list.begin() + testing_layer_count), (it_conf > layer_config_list.begin() + testing_layer_count))); } } std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_learning_rate( const std::vector<network_data_smart_ptr>& learning_rate_list, cudaStream_t stream_id) const { std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> > res; const network_data_smart_ptr& first_data = learning_rate_list[0]; for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id) { std::vector<const_cuda_linear_buffer_device_smart_ptr> buffer_list; unsigned int subindex = 0; for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex) { size_t single_size = it->size(); std::vector<float> pack(single_size * learning_rate_list.size()); std::vector<float>::iterator fill_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = learning_rate_list.begin(); sample_it != learning_rate_list.end(); sample_it++) { const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex); fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it); } buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device( &(*pack.begin()), pack.size() * sizeof(float), stream_id))); } res.push_back(buffer_list); } return res; } std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > network_updater_cuda::enqueue_get_data( const std::vector<network_data_smart_ptr>& data_list, cudaStream_t stream_id) const { std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> > res; const network_data_smart_ptr& first_data = data_list[0]; for(unsigned int layer_id = testing_layer_count; layer_id < updater_schemas.size() + testing_layer_count; ++layer_id) { std::vector<cuda_linear_buffer_device_smart_ptr> buffer_list; unsigned int subindex = 0; for(std::vector<std::vector<float> >::iterator it = (*first_data)[layer_id]->begin(); it != (*first_data)[layer_id]->end(); ++it, ++subindex) { size_t single_size = it->size(); std::vector<float> pack(single_size * data_list.size()); std::vector<float>::iterator fill_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = data_list.begin(); sample_it != data_list.end(); sample_it++) { const std::vector<float>& inp_buf = (*sample_it)->at(layer_id)->at(subindex); fill_it = std::copy(inp_buf.begin(), inp_buf.end(), fill_it); } buffer_list.push_back(cuda_linear_buffer_device_smart_ptr(new cuda_linear_buffer_device( &(*pack.begin()), pack.size() * sizeof(float), stream_id))); } res.push_back(buffer_list); } return res; } void network_updater_cuda::read_data( std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >& data_list, std::vector<network_data_smart_ptr>& res, cudaStream_t stream_id) const { const network_data_smart_ptr& first_data = res[0]; unsigned int layer_id = testing_layer_count; for(std::vector<std::vector<cuda_linear_buffer_device_smart_ptr> >::iterator src_it = data_list.begin(); src_it != data_list.end(); ++src_it, ++layer_id) { unsigned int subindex = 0; for(std::vector<cuda_linear_buffer_device_smart_ptr>::iterator src_it2 = src_it->begin(); src_it2 != src_it->end(); ++src_it2, ++subindex) { cuda_linear_buffer_device_smart_ptr src = *src_it2; std::vector<float> pack(src->get_size() / sizeof(float)); cuda_safe_call(cudaMemcpyAsync(&(*pack.begin()), *src, pack.size() * sizeof(float), cudaMemcpyDeviceToHost, stream_id)); cuda_safe_call(cudaStreamSynchronize(stream_id)); std::vector<float>::const_iterator src_buf_it = pack.begin(); for(std::vector<network_data_smart_ptr>::const_iterator sample_it = res.begin(); sample_it != res.end(); sample_it++) { std::vector<float>& dst_buf = (*sample_it)->at(layer_id)->at(subindex); std::copy(src_buf_it, src_buf_it + dst_buf.size(), dst_buf.begin()); src_buf_it += dst_buf.size(); } } } } void network_updater_cuda::update_buffers_configuration( buffer_cuda_size_configuration& buffer_configuration, unsigned int updater_entry_count) const { for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = testing_schema_data.begin(); it != testing_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_tester_cuda_smart_ptr>::const_iterator it = tester_list.begin(); it != tester_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::vector<std::vector<const_cuda_linear_buffer_device_smart_ptr> >::const_iterator it = updater_schema_data.begin(); it != updater_schema_data.end(); ++it) for(std::vector<const_cuda_linear_buffer_device_smart_ptr>::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) buffer_configuration.add_constant_buffer((*it2)->get_size()); for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration, updater_entry_count); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration, updater_entry_count); } unsigned int network_updater_cuda::get_max_batch_size() const { buffer_cuda_size_configuration buffer_configuration; for(std::vector<layer_updater_cuda_smart_ptr>::const_iterator it = updater_list.begin(); it != updater_list.end(); ++it) (*it)->update_buffer_configuration(buffer_configuration); for(std::map<unsigned int, weight_vector_bound_cuda_smart_ptr>::const_iterator it = weight_vector_bounds.begin(); it != weight_vector_bounds.end(); ++it) it->second->update_buffer_configuration(buffer_configuration); return cuda_config->get_max_entry_count(buffer_configuration, 0.5F); } void network_updater_cuda::enqueue_dropout( cudaStream_t stream_id, const_cuda_linear_buffer_device_smart_ptr random_buffer, cuda_linear_buffer_device_smart_ptr target_buffer, float dropout_rate, unsigned int mask, unsigned int elem_count, unsigned int offset_in_random_list) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); dropout_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *target_buffer, *random_buffer, dropout_rate, offset_in_random_list, mask, elem_count); } } }
b075a4a7c2feb448c62a30f872a5dd956756c084.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO \ 1 // halo width along one direction when advancing to the next iteration void run(int argc, char **argv); int rows, cols; int *data; int **wall; int *result; int pyramid_height; void init(int argc, char **argv) { if (argc == 4) { cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height = atoi(argv[3]); } else { printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(1); } data = new int[rows * cols]; wall = new int *[rows]; for (int n = 0; n < rows; n++) wall[n] = data + cols * n; result = new int[cols]; srand(7); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } if (getenv("OUTPUT")) { FILE *file = fopen("output.txt", "w"); fprintf(file, "wall:\n"); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { fprintf(file, "%d ", wall[i][j]); } fprintf(file, "\n"); } fclose(file); } } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE - iteration * HALO * 2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols * bx - border; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int xidx = blkX + tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1) : BLOCK_SIZE - 1; int W = tx - 1; int E = tx + 1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if (IN_RANGE(xidx, 0, cols - 1)) { prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) { computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols * (startStep + i) + xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range prev[tx] = result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 // 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { gpuResults[xidx] = result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows - 1; t += pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src], gpuResult[dst], cols, rows, t, borderCols); } return dst; } int main(int argc, char **argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc, argv); return EXIT_SUCCESS; } void run(int argc, char **argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2; int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: " "%d\nblockGrid:[%d]\ntargetBlock:[%d]\n", pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows * cols; struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); hipMalloc((void **)&gpuResult[0], sizeof(int) * cols); hipMalloc((void **)&gpuResult[1], sizeof(int) * cols); hipMemcpy(gpuResult[0], data, sizeof(int) * cols, hipMemcpyHostToDevice); hipMalloc((void **)&gpuWall, sizeof(int) * (size - cols)); hipMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols), hipMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int) * cols, hipMemcpyDeviceToHost); clock_gettime(CLOCK_REALTIME, &end); double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec)/1E9; printf("%.6f seconds\n", elapsed); if (getenv("OUTPUT")) { FILE *file = fopen("output.txt", "a"); fprintf(file, "data:\n"); for (int i = 0; i < cols; i++) fprintf(file, "%d ", data[i]); fprintf(file, "\n"); fprintf(file, "result:\n"); for (int i = 0; i < cols; i++) fprintf(file, "%d ", result[i]); fprintf(file, "\n"); fclose(file); } hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); delete[] data; delete[] wall; delete[] result; }
b075a4a7c2feb448c62a30f872a5dd956756c084.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO \ 1 // halo width along one direction when advancing to the next iteration void run(int argc, char **argv); int rows, cols; int *data; int **wall; int *result; int pyramid_height; void init(int argc, char **argv) { if (argc == 4) { cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height = atoi(argv[3]); } else { printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(1); } data = new int[rows * cols]; wall = new int *[rows]; for (int n = 0; n < rows; n++) wall[n] = data + cols * n; result = new int[cols]; srand(7); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } if (getenv("OUTPUT")) { FILE *file = fopen("output.txt", "w"); fprintf(file, "wall:\n"); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { fprintf(file, "%d ", wall[i][j]); } fprintf(file, "\n"); } fclose(file); } } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x) >= (min) && (x) <= (max)) #define CLAMP_RANGE(x, min, max) x = (x < (min)) ? min : ((x > (max)) ? max : x) #define MIN(a, b) ((a) <= (b) ? (a) : (b)) __global__ void dynproc_kernel(int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE - iteration * HALO * 2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols * bx - border; int blkXmax = blkX + BLOCK_SIZE - 1; // calculate the global thread coordination int xidx = blkX + tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols - 1) ? BLOCK_SIZE - 1 - (blkXmax - cols + 1) : BLOCK_SIZE - 1; int W = tx - 1; int E = tx + 1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if (IN_RANGE(xidx, 0, cols - 1)) { prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i = 0; i < iteration; i++) { computed = false; if (IN_RANGE(tx, i + 1, BLOCK_SIZE - i - 2) && isValid) { computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols * (startStep + i) + xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if (i == iteration - 1) break; if (computed) // Assign the computation range prev[tx] = result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 // 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed) { gpuResults[xidx] = result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; for (int t = 0; t < rows - 1; t += pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows - t - 1), gpuWall, gpuResult[src], gpuResult[dst], cols, rows, t, borderCols); } return dst; } int main(int argc, char **argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc, argv); return EXIT_SUCCESS; } void run(int argc, char **argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE - (pyramid_height)*HALO * 2; int blockCols = cols / smallBlockCol + ((cols % smallBlockCol == 0) ? 0 : 1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: " "%d\nblockGrid:[%d]\ntargetBlock:[%d]\n", pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows * cols; struct timespec start, end; clock_gettime(CLOCK_REALTIME, &start); cudaMalloc((void **)&gpuResult[0], sizeof(int) * cols); cudaMalloc((void **)&gpuResult[1], sizeof(int) * cols); cudaMemcpy(gpuResult[0], data, sizeof(int) * cols, cudaMemcpyHostToDevice); cudaMalloc((void **)&gpuWall, sizeof(int) * (size - cols)); cudaMemcpy(gpuWall, data + cols, sizeof(int) * (size - cols), cudaMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int) * cols, cudaMemcpyDeviceToHost); clock_gettime(CLOCK_REALTIME, &end); double elapsed = (end.tv_sec - start.tv_sec) + (end.tv_nsec - start.tv_nsec)/1E9; printf("%.6f seconds\n", elapsed); if (getenv("OUTPUT")) { FILE *file = fopen("output.txt", "a"); fprintf(file, "data:\n"); for (int i = 0; i < cols; i++) fprintf(file, "%d ", data[i]); fprintf(file, "\n"); fprintf(file, "result:\n"); for (int i = 0; i < cols; i++) fprintf(file, "%d ", result[i]); fprintf(file, "\n"); fclose(file); } cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); delete[] data; delete[] wall; delete[] result; }
af1b4448f09ff5f258eba653363801f8e5882a22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) OpenMMLab. All rights reserved // Modified from // https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include "cc_attention_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f, Tensor weight) { AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] { hipLaunchKernelGGL(( ca_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t, const Tensor f, Tensor dt, Tensor df) { AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] { hipLaunchKernelGGL(( ca_backward_kernel_t<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), dt.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] { hipLaunchKernelGGL(( ca_backward_kernel_f<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), df.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g, Tensor out) { AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = g.size(0); auto c = g.size(1); auto h = g.size(2); auto w = g.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] { hipLaunchKernelGGL(( ca_map_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), out.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); } void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight, const Tensor g, Tensor dw, Tensor dg) { AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = dout.size(0); auto c = dout.size(1); auto h = dout.size(2); auto w = dout.size(3); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES( weight.scalar_type(), "ca_map_backward_kernel_w", [&] { hipLaunchKernelGGL(( ca_map_backward_kernel_w<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dw.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); d3 = c * n; blocks = dim3(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] { hipLaunchKernelGGL(( ca_map_backward_kernel_g<scalar_t>), dim3(blocks), dim3(threads), 0, stream, dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dg.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(hipGetLastError()); }
af1b4448f09ff5f258eba653363801f8e5882a22.cu
// Copyright (c) OpenMMLab. All rights reserved // Modified from // https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/modules/csrc/criss_cross_attention/ca_cuda.cu #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include "cc_attention_cuda_kernel.cuh" #include "pytorch_cuda_helper.hpp" void CAForwardCUDAKernelLauncher(const Tensor t, const Tensor f, Tensor weight) { AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_forward", [&] { ca_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CABackwardCUDAKernelLauncher(const Tensor dw, const Tensor t, const Tensor f, Tensor dt, Tensor df) { AT_ASSERTM(dw.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(t.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(f.device().is_cuda(), "input must be a CUDA tensor"); auto n = t.size(0); auto c = t.size(1); auto h = t.size(2); auto w = t.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(t.scalar_type(), "ca_backward_kernel_t", [&] { ca_backward_kernel_t<scalar_t><<<blocks, threads, 0, stream>>>( dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), dt.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); AT_DISPATCH_FLOATING_TYPES(f.scalar_type(), "ca_backward_kernel_f", [&] { ca_backward_kernel_f<scalar_t><<<blocks, threads, 0, stream>>>( dw.contiguous().data_ptr<scalar_t>(), t.contiguous().data_ptr<scalar_t>(), f.contiguous().data_ptr<scalar_t>(), df.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CAMapForwardCUDAKernelLauncher(const Tensor weight, const Tensor g, Tensor out) { AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = g.size(0); auto c = g.size(1); auto h = g.size(2); auto w = g.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = c * n; dim3 blocks(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_forward", [&] { ca_map_forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), out.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); } void CAMapBackwardCUDAKernelLauncher(const Tensor dout, const Tensor weight, const Tensor g, Tensor dw, Tensor dg) { AT_ASSERTM(dout.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(weight.device().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(g.device().is_cuda(), "input must be a CUDA tensor"); auto n = dout.size(0); auto c = dout.size(1); auto h = dout.size(2); auto w = dout.size(3); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // Run kernel dim3 threads(32, 32); int d1 = (w + threads.x - 1) / threads.x; int d2 = (h + threads.y - 1) / threads.y; int d3 = h + w - 1; dim3 blocks(d1, d2, d3 * n); AT_DISPATCH_FLOATING_TYPES( weight.scalar_type(), "ca_map_backward_kernel_w", [&] { ca_map_backward_kernel_w<scalar_t><<<blocks, threads, 0, stream>>>( dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dw.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); d3 = c * n; blocks = dim3(d1, d2, d3); AT_DISPATCH_FLOATING_TYPES(g.scalar_type(), "ca_map_backward_kernel_g", [&] { ca_map_backward_kernel_g<scalar_t><<<blocks, threads, 0, stream>>>( dout.contiguous().data_ptr<scalar_t>(), weight.contiguous().data_ptr<scalar_t>(), g.contiguous().data_ptr<scalar_t>(), dg.contiguous().data_ptr<scalar_t>(), n, c, h, w); }); THCudaCheck(cudaGetLastError()); }
01c6abf0debef74548dc8dd1aa4d11706bd6d02c.hip
// !!! This is a file automatically generated by hipify!!! #include "SRPS.h" SRPS::SRPS(DataHandler& dh) { this->dh = &dh; } SRPS::~SRPS() {} template <typename T> void set_sparse_matrix_for_gradient(SparseCOO<T>& D, thrust::host_vector<int>& ic, thrust::host_vector<int>& ir, float k1, float k2) { memcpy(D.row, ic.data(), sizeof(int)*ic.size()); memcpy(D.row + ic.size(), ic.data(), sizeof(int)*ic.size()); memcpy(D.col, ir.data(), sizeof(int)*ir.size()); memcpy(D.col + ir.size(), ic.data(), sizeof(int)*ic.size()); for (size_t i = 0; i < ic.size(); i++) { D.val[i] = k1; } for (size_t i = ic.size(); i < 2 * ic.size(); i++) { D.val[i] = k2; } } std::pair<SparseCOO<float>, SparseCOO<float>> make_gradient(float* mask, int h, int w, int* index_in_masked_matrix, int mask_size) { thrust::host_vector<int> ic_top, ir_top; thrust::host_vector<int> ic_left, ir_left; thrust::host_vector<int> ic_right, ir_right; thrust::host_vector<int> ic_bottom, ir_bottom; for (int j = 0; j < w; j++) { for (int i = 0; i < h; i++) { if (i + 1 < h && mask[i + j * h] != 0 && mask[i + 1 + j * h] != 0) { ic_bottom.push_back(index_in_masked_matrix[i + j * h]); ir_bottom.push_back(index_in_masked_matrix[i + 1 + j * h]); } else if (i - 1 >= 0 && mask[i + j * h] != 0 && mask[i - 1 + j * h] != 0) { ic_top.push_back(index_in_masked_matrix[i + j * h]); ir_top.push_back(index_in_masked_matrix[i - 1 + j * h]); } if (j + 1 < w && mask[i + j * h] != 0 && mask[i + (j + 1) * h] != 0) { ic_right.push_back(index_in_masked_matrix[i + j * h]); ir_right.push_back(index_in_masked_matrix[i + (j + 1) * h]); } else if (j - 1 >= 0 && mask[i + j * h] != 0 && mask[i + (j - 1) * h] != 0) { ic_left.push_back(index_in_masked_matrix[i + j * h]); ir_left.push_back(index_in_masked_matrix[i + (j - 1) * h]); } } } SparseCOO<float> Dxp(mask_size, mask_size, (int)ic_right.size() * 2); set_sparse_matrix_for_gradient<float>(Dxp, ic_right, ir_right, 1, -1); SparseCOO<float> Dxn(mask_size, mask_size, (int)ic_left.size() * 2); set_sparse_matrix_for_gradient<float>(Dxn, ic_left, ir_left, -1, 1); SparseCOO<float> Dyp(mask_size, mask_size, (int)ic_bottom.size() * 2); set_sparse_matrix_for_gradient<float>(Dyp, ic_bottom, ir_bottom, 1, -1); SparseCOO<float> Dyn(mask_size, mask_size, (int)ic_top.size() * 2); set_sparse_matrix_for_gradient<float>(Dyn, ic_top, ir_top, -1, 1); SparseCOO<float> Dx = Dxp + Dxn; SparseCOO<float> Dy = Dyp + Dyn; Dxp.freeMemory(); Dyp.freeMemory(); Dxn.freeMemory(); Dyn.freeMemory(); return std::pair<SparseCOO<float>, SparseCOO<float>>(Dx, Dy); } template<class Iter, class T> Iter binary_find(Iter begin, Iter end, T val) { // Finds the lower bound in at most log(last - first) + 1 comparisons Iter i = std::lower_bound(begin, end, val); if (i != end && !(val < *i)) return i; // found else return end; // not found } void SRPS::execute() { float TOLERANCE = 5e-3f; int MAX_ITERATIONS = 10; hipSetDevice(Preferences::deviceId); // Initialize CUBLAS/CUSPARSE hipsparseHandle_t cusp_handle = 0; hipblasHandle_t cublas_handle = 0; if (hipsparseCreate(&cusp_handle) != HIPSPARSE_STATUS_SUCCESS) { throw std::runtime_error("CUSPARSE Library initialization failed"); } if (hipblasCreate(&cublas_handle) != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("CUBLAS Library initialization failed"); } // Move downsampling Matrix to device in CSR format int* d_D_row_ptr, *d_D_col_ind; float* d_D_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &dh->D, &d_D_row_ptr, &d_D_col_ind, &d_D_val); // Create mask for the downsampled image (GPU) std::cout << "Small mask calculation" << std::endl; float* d_mask = NULL; hipMalloc(&d_mask, sizeof(float) * dh->I_h * dh->I_w); hipMemcpy(d_mask, dh->mask, sizeof(float) * dh->I_h * dh->I_w, hipMemcpyHostToDevice); float* d_masks = cuda_based_sparsemat_densevec_mul(cusp_handle, d_D_row_ptr, d_D_col_ind, d_D_val, dh->D.n_row, dh->D.n_col, dh->D.n_nz, d_mask); thrust::replace_if(THRUST_CAST(d_masks), THRUST_CAST(d_masks) + dh->D.n_row, is_less_than_one(), 0.f); // Copy it back to host for imasks calculation float* masks = new float[dh->D.n_row]; hipMemcpy(masks, d_masks, sizeof(float)*dh->D.n_row, hipMemcpyDeviceToHost); CUDA_CHECK; // Depth mean (GPU), inpainting (CPU) and smoothing (CPU) std::cout << "Mean of depth values" << std::endl; float* inpaint_mask = new float[dh->z0_h*dh->z0_w]; float* zs = new float[dh->z0_h*dh->z0_w]; uint8_t* inpaint_locations = new uint8_t[dh->z0_h*dh->z0_w]; uint8_t* d_inpaint_locations = NULL; float* d_zs = cuda_based_mean_across_channels(dh->z0, dh->z0_h, dh->z0_w, dh->z0_n, &d_inpaint_locations); hipMemcpy(zs, d_zs, sizeof(float)*dh->z0_h*dh->z0_w, hipMemcpyDeviceToHost); CUDA_CHECK; hipMemcpy(inpaint_locations, d_inpaint_locations, sizeof(uint8_t)*dh->z0_h*dh->z0_w, hipMemcpyDeviceToHost); CUDA_CHECK; hipFree(d_inpaint_locations); CUDA_CHECK; std::cout << "Inpainting depth values" << std::endl; cv::Mat zs_mat((int)dh->z0_w, (int)dh->z0_h, CV_32FC1, zs); cv::Mat zs_out_mat((int)dh->z0_w, (int)dh->z0_h, CV_32FC1); cv::Mat inpaint_locations_mat((int)dh->z0_w, (int)dh->z0_h, CV_8UC1, inpaint_locations); cv::inpaint(zs_mat, inpaint_locations_mat, zs_mat, 16, cv::INPAINT_TELEA); std::cout << "Smoothing depth" << std::endl; double min, max; cv::minMaxIdx(zs_mat, &min, &max); zs_mat = zs_mat / max; cv::bilateralFilter(zs_mat, zs_out_mat, -1, 2, 2); zs_out_mat *= max; hipMemcpy(d_zs, zs_out_mat.data, sizeof(float)*dh->z0_h*dh->z0_w, hipMemcpyHostToDevice); CUDA_CHECK; WRITE_MAT_FROM_DEVICE(d_zs, dh->z0_h*dh->z0_w, "zs_init.mat"); // Upscale of depth to get initial estimate of z (CPU) std::cout << "Resample depths" << std::endl; float* z_full = new float[dh->I_h*dh->I_w]; cv::Mat z_mat((int)dh->I_w, (int)dh->I_h, CV_32FC1, z_full); cv::resize(zs_out_mat, z_mat, cv::Size(dh->I_h, dh->I_w), 0, 0, cv::INTER_CUBIC); // Indices of Mask and Masks (CPU) std::cout << "Mask index calculation" << std::endl; thrust::host_vector<int> imask, imasks; int* index_in_masked_matrix = new int[dh->I_h*dh->I_w]; memset(index_in_masked_matrix, 0, sizeof(int)*dh->I_h*dh->I_w); int ctr = 0; for (int i = 0; i < dh->D.n_col; i++) { if (dh->mask[i] != 0) { imask.push_back(i); index_in_masked_matrix[i] = ctr++; } } for (int i = 0; i < dh->D.n_row; i++) { if (masks[i] != 0) imasks.push_back(i); } int npix = (int)imask.size(); int npixs = (int)imasks.size(); // Calculation of filtered resample matrix that operates only on the masked pixels (CPU) std::cout << "Masked resample matrix" << std::endl; thrust::host_vector<int> KT_row; thrust::host_vector<int> KT_col; thrust::sort(thrust::host, imask.begin(), imask.end()); thrust::sort(thrust::host, imasks.begin(), imasks.end()); for (int i = 0; i < dh->D.n_nz; i++) { thrust::detail::normal_iterator<int*> its = binary_find(imasks.begin(), imasks.end(), dh->D.row[i]); thrust::detail::normal_iterator<int*> it = binary_find(imask.begin(), imask.end(), dh->D.col[i]); if (its != imasks.end() && it != imask.end()) { KT_row.push_back(its - imasks.begin()); KT_col.push_back(it - imask.begin()); } } SparseCOO<float> KT((int)imasks.size(), (int)imask.size(), (int)KT_row.size()); memcpy(KT.row, KT_row.data(), KT_row.size() * sizeof(int)); memcpy(KT.col, KT_col.data(), KT_col.size() * sizeof(int)); for (size_t i = 0; i < KT_row.size(); i++) { KT.val[i] = 1.f / (dh->sf*dh->sf); } int* d_KT_row_ptr, *d_KT_col_ind; float* d_KT_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &KT, &d_KT_row_ptr, &d_KT_col_ind, &d_KT_val); KT.freeMemory(); // Create gradient matrices for non square shapes (CPU) std::cout << "Masked gradient matrix" << std::endl; std::pair<SparseCOO<float>, SparseCOO<float>> G = make_gradient(dh->mask, dh->I_h, dh->I_w, index_in_masked_matrix, (int)imask.size()); int* d_Dx_row_ptr, *d_Dx_col_ind, *d_Dy_row_ptr, *d_Dy_col_ind; float* d_Dx_val, *d_Dy_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &G.first, &d_Dx_row_ptr, &d_Dx_col_ind, &d_Dx_val); cuda_based_host_COO_to_device_CSR(cusp_handle, &G.second, &d_Dy_row_ptr, &d_Dy_col_ind, &d_Dy_val); G.first.freeMemory(); G.second.freeMemory(); std::cout << "Initialization" << std::endl; // Lighting (s) initialization (GPU) float* d_s = NULL; hipMalloc(&d_s, dh->I_n * dh->I_c * 4 * sizeof(float)); CUDA_CHECK; hipMemset(d_s, 0, dh->I_c * 4 * dh->I_n * sizeof(float)); CUDA_CHECK; thrust::device_ptr<float> dt_s = thrust::device_pointer_cast(d_s); for (int i = 0; i < dh->I_n; i++) { for (int j = 0; j < dh->I_c; j++) { dt_s[i * 4 * dh->I_c + j * 4 + 2] = -1; } } // Albedo (rho) initialization (GPU) float* d_rho = cuda_based_rho_init(imask, dh->I_c); // Copying masked portion of I (GPU) float* d_I = NULL, *d_I_complete = NULL, *d_mask_extended = NULL; hipMalloc(&d_I, imask.size() * dh->I_c * dh->I_n * sizeof(float)); CUDA_CHECK; hipMalloc(&d_I_complete, dh->I_w * dh->I_h * dh->I_c * sizeof(float)); CUDA_CHECK; hipMalloc(&d_mask_extended, dh->I_w * dh->I_h * dh->I_c * sizeof(float)); CUDA_CHECK; for (int n = 0; n < dh->I_n; n++) { for (int i = 0; i < dh->I_c; i++) hipMemcpy(d_mask_extended + dh->I_w * dh->I_h * i, d_mask, dh->I_w * dh->I_h * sizeof(float), hipMemcpyDeviceToDevice); CUDA_CHECK; hipMemcpy(d_I_complete, dh->I + n * dh->I_w * dh->I_h * dh->I_c, dh->I_w * dh->I_h * dh->I_c * sizeof(float), hipMemcpyHostToDevice); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_I_complete), THRUST_CAST(d_I_complete) + dh->I_c*dh->I_w*dh->I_h, THRUST_CAST(d_mask_extended), THRUST_CAST(d_I) + imask.size() * dh->I_c * n, is_one()); CUDA_CHECK; } hipFree(d_mask_extended); CUDA_CHECK; hipFree(d_I_complete); CUDA_CHECK; // Copying masked portion of LR depth (GPU) float *d_z0s = NULL; hipMalloc(&d_z0s, sizeof(float)*imasks.size()); CUDA_CHECK;; thrust::copy_if(thrust::device, THRUST_CAST(d_zs), THRUST_CAST(d_zs) + dh->z0_h*dh->z0_w, THRUST_CAST(d_masks), THRUST_CAST(d_z0s), is_one()); CUDA_CHECK; // Copying masked portion of initial HR depth (GPU) float* d_z = NULL, *d_z_full = NULL; hipMalloc(&d_z, sizeof(float)*imask.size()); CUDA_CHECK; hipMalloc(&d_z_full, sizeof(float)*dh->I_h*dh->I_w); CUDA_CHECK; hipMemcpy(d_z_full, z_full, sizeof(float)*dh->I_h*dh->I_w, hipMemcpyHostToDevice); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_z_full), THRUST_CAST(d_z_full) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_z), is_one()); CUDA_CHECK; hipFree(d_z_full); hipFree(d_zs); CUDA_CHECK; WRITE_MAT_FROM_DEVICE(d_z, imask.size(), "z_init.mat"); // Meshgrid for normal estimation (GPU) float* d_xx = NULL, *d_yy = NULL; hipMalloc(&d_xx, sizeof(float)*imask.size()); CUDA_CHECK; hipMalloc(&d_yy, sizeof(float)*imask.size()); CUDA_CHECK; std::pair<float*, float*> d_meshgrid = cuda_based_meshgrid_create(dh->I_w, dh->I_h, dh->K[6], dh->K[7]); thrust::copy_if(thrust::device, THRUST_CAST(d_meshgrid.first), THRUST_CAST(d_meshgrid.first) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_xx), is_one()); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_meshgrid.second), THRUST_CAST(d_meshgrid.second) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_yy), is_one()); CUDA_CHECK; hipFree(d_meshgrid.first); hipFree(d_meshgrid.second); // zx and zy for normal estimation (GPU) float *d_zx = NULL, *d_zy = NULL; d_zx = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_z); d_zy = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_z); // Normal initialization (GPU) float* d_dz = NULL; float* d_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); float* d_init_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); // Core algorithm (GPU) float last_error = NAN; bool stop_loop = false; int iteration = 1; do { Timer timer; timer.start(); // Lighting estimation (GPU) cuda_based_lightning_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("\n%-25s: %-6.6fs\n", "Lightning Estimation", timer.get()); timer.start(); // Albedo estimation (GPU) cuda_based_albedo_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("%-25s: %-6.6fs\n", "Albedo Estimation", timer.get()); timer.start(); // Depth estimation (GPU) float error = cuda_based_depth_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, d_xx, d_yy, d_dz, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_KT_row_ptr, d_KT_col_ind, d_KT_val, KT.n_row, KT.n_col, KT.n_nz, d_z0s, d_z, dh->K[0], dh->K[4], (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("%-25s: %-6.6fs\n", "Depth Estimation", timer.get()); // Terminating conditions float rel_err = fabs(last_error - error) / fabs(error); if (error > last_error || rel_err < TOLERANCE || iteration > MAX_ITERATIONS) { stop_loop = true; } last_error = error; printf("\nIteration %02d summary\n", iteration); printf("%-25s: %-6.3f\n", "Error", error); printf("%-25s: %-6.3f\n", "Relative Error",rel_err); hipFree(d_zx); CUDA_CHECK; hipFree(d_zy); CUDA_CHECK; // Recalculate normals (GPU) d_zx = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_z); d_zy = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_z); hipFree(d_dz); CUDA_CHECK; hipFree(d_N); CUDA_CHECK; d_dz = NULL; d_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); iteration++; // Visualizations float scale = 0.425f; cv::imshow("Normals-Initial", N_as_opencv_mat(d_init_N, imask, dh->I_h, dh->I_w, scale)); cv::moveWindow("Normals-Initial", 10, 10); cv::imshow("Normals-Current-Iteration", N_as_opencv_mat(d_N, imask, dh->I_h, dh->I_w, scale)); cv::moveWindow("Normals-Current-Iteration", (int)(30 + dh->I_h * scale), 10); cv::imshow("Albedo", rho_as_opencv_mat(d_rho, imask, dh->I_h, dh->I_w, dh->I_c, scale)); cv::moveWindow("Albedo", (int)(30 + 2 * dh->I_h * scale),10); cv::waitKey(5); // Dump as MAT files WRITE_MAT_FROM_DEVICE(d_s, dh->I_n * dh->I_c * 4, "s.mat"); WRITE_MAT_FROM_DEVICE(d_rho, imask.size() * dh->I_c, "rho.mat"); WRITE_MAT_FROM_DEVICE(d_z, imask.size(), "z.mat"); WRITE_MAT_FROM_DEVICE(d_N, imask.size() * 4, "N.mat"); } while (!stop_loop); std::cout << "Done!" << std::endl; cv::waitKey(0); if (hipsparseDestroy(cusp_handle) != HIPSPARSE_STATUS_SUCCESS) { throw std::runtime_error("CUSPARSE Library release of resources failed"); } if (hipblasDestroy(cublas_handle) != HIPBLAS_STATUS_SUCCESS) { throw std::runtime_error("CUBLAS Library release of resources failed"); } dh->D.freeMemory(); hipFree(d_D_col_ind); CUDA_CHECK; hipFree(d_D_row_ptr); CUDA_CHECK; hipFree(d_D_val); CUDA_CHECK; hipFree(d_Dx_col_ind); CUDA_CHECK; hipFree(d_Dx_row_ptr); CUDA_CHECK; hipFree(d_Dx_val); CUDA_CHECK; hipFree(d_Dy_col_ind); CUDA_CHECK; hipFree(d_Dy_row_ptr); CUDA_CHECK; hipFree(d_Dy_val); CUDA_CHECK; hipFree(d_mask); CUDA_CHECK; hipFree(d_N); CUDA_CHECK; hipFree(d_init_N); CUDA_CHECK; hipFree(d_z); CUDA_CHECK; hipFree(d_dz); CUDA_CHECK; hipFree(d_z0s); CUDA_CHECK; hipFree(d_masks); CUDA_CHECK; hipFree(d_I); CUDA_CHECK; delete[] inpaint_mask; delete[] masks; delete[] inpaint_locations; delete[] zs; delete[] z_full; }
01c6abf0debef74548dc8dd1aa4d11706bd6d02c.cu
#include "SRPS.h" SRPS::SRPS(DataHandler& dh) { this->dh = &dh; } SRPS::~SRPS() {} template <typename T> void set_sparse_matrix_for_gradient(SparseCOO<T>& D, thrust::host_vector<int>& ic, thrust::host_vector<int>& ir, float k1, float k2) { memcpy(D.row, ic.data(), sizeof(int)*ic.size()); memcpy(D.row + ic.size(), ic.data(), sizeof(int)*ic.size()); memcpy(D.col, ir.data(), sizeof(int)*ir.size()); memcpy(D.col + ir.size(), ic.data(), sizeof(int)*ic.size()); for (size_t i = 0; i < ic.size(); i++) { D.val[i] = k1; } for (size_t i = ic.size(); i < 2 * ic.size(); i++) { D.val[i] = k2; } } std::pair<SparseCOO<float>, SparseCOO<float>> make_gradient(float* mask, int h, int w, int* index_in_masked_matrix, int mask_size) { thrust::host_vector<int> ic_top, ir_top; thrust::host_vector<int> ic_left, ir_left; thrust::host_vector<int> ic_right, ir_right; thrust::host_vector<int> ic_bottom, ir_bottom; for (int j = 0; j < w; j++) { for (int i = 0; i < h; i++) { if (i + 1 < h && mask[i + j * h] != 0 && mask[i + 1 + j * h] != 0) { ic_bottom.push_back(index_in_masked_matrix[i + j * h]); ir_bottom.push_back(index_in_masked_matrix[i + 1 + j * h]); } else if (i - 1 >= 0 && mask[i + j * h] != 0 && mask[i - 1 + j * h] != 0) { ic_top.push_back(index_in_masked_matrix[i + j * h]); ir_top.push_back(index_in_masked_matrix[i - 1 + j * h]); } if (j + 1 < w && mask[i + j * h] != 0 && mask[i + (j + 1) * h] != 0) { ic_right.push_back(index_in_masked_matrix[i + j * h]); ir_right.push_back(index_in_masked_matrix[i + (j + 1) * h]); } else if (j - 1 >= 0 && mask[i + j * h] != 0 && mask[i + (j - 1) * h] != 0) { ic_left.push_back(index_in_masked_matrix[i + j * h]); ir_left.push_back(index_in_masked_matrix[i + (j - 1) * h]); } } } SparseCOO<float> Dxp(mask_size, mask_size, (int)ic_right.size() * 2); set_sparse_matrix_for_gradient<float>(Dxp, ic_right, ir_right, 1, -1); SparseCOO<float> Dxn(mask_size, mask_size, (int)ic_left.size() * 2); set_sparse_matrix_for_gradient<float>(Dxn, ic_left, ir_left, -1, 1); SparseCOO<float> Dyp(mask_size, mask_size, (int)ic_bottom.size() * 2); set_sparse_matrix_for_gradient<float>(Dyp, ic_bottom, ir_bottom, 1, -1); SparseCOO<float> Dyn(mask_size, mask_size, (int)ic_top.size() * 2); set_sparse_matrix_for_gradient<float>(Dyn, ic_top, ir_top, -1, 1); SparseCOO<float> Dx = Dxp + Dxn; SparseCOO<float> Dy = Dyp + Dyn; Dxp.freeMemory(); Dyp.freeMemory(); Dxn.freeMemory(); Dyn.freeMemory(); return std::pair<SparseCOO<float>, SparseCOO<float>>(Dx, Dy); } template<class Iter, class T> Iter binary_find(Iter begin, Iter end, T val) { // Finds the lower bound in at most log(last - first) + 1 comparisons Iter i = std::lower_bound(begin, end, val); if (i != end && !(val < *i)) return i; // found else return end; // not found } void SRPS::execute() { float TOLERANCE = 5e-3f; int MAX_ITERATIONS = 10; cudaSetDevice(Preferences::deviceId); // Initialize CUBLAS/CUSPARSE cusparseHandle_t cusp_handle = 0; cublasHandle_t cublas_handle = 0; if (cusparseCreate(&cusp_handle) != CUSPARSE_STATUS_SUCCESS) { throw std::runtime_error("CUSPARSE Library initialization failed"); } if (cublasCreate(&cublas_handle) != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("CUBLAS Library initialization failed"); } // Move downsampling Matrix to device in CSR format int* d_D_row_ptr, *d_D_col_ind; float* d_D_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &dh->D, &d_D_row_ptr, &d_D_col_ind, &d_D_val); // Create mask for the downsampled image (GPU) std::cout << "Small mask calculation" << std::endl; float* d_mask = NULL; cudaMalloc(&d_mask, sizeof(float) * dh->I_h * dh->I_w); cudaMemcpy(d_mask, dh->mask, sizeof(float) * dh->I_h * dh->I_w, cudaMemcpyHostToDevice); float* d_masks = cuda_based_sparsemat_densevec_mul(cusp_handle, d_D_row_ptr, d_D_col_ind, d_D_val, dh->D.n_row, dh->D.n_col, dh->D.n_nz, d_mask); thrust::replace_if(THRUST_CAST(d_masks), THRUST_CAST(d_masks) + dh->D.n_row, is_less_than_one(), 0.f); // Copy it back to host for imasks calculation float* masks = new float[dh->D.n_row]; cudaMemcpy(masks, d_masks, sizeof(float)*dh->D.n_row, cudaMemcpyDeviceToHost); CUDA_CHECK; // Depth mean (GPU), inpainting (CPU) and smoothing (CPU) std::cout << "Mean of depth values" << std::endl; float* inpaint_mask = new float[dh->z0_h*dh->z0_w]; float* zs = new float[dh->z0_h*dh->z0_w]; uint8_t* inpaint_locations = new uint8_t[dh->z0_h*dh->z0_w]; uint8_t* d_inpaint_locations = NULL; float* d_zs = cuda_based_mean_across_channels(dh->z0, dh->z0_h, dh->z0_w, dh->z0_n, &d_inpaint_locations); cudaMemcpy(zs, d_zs, sizeof(float)*dh->z0_h*dh->z0_w, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaMemcpy(inpaint_locations, d_inpaint_locations, sizeof(uint8_t)*dh->z0_h*dh->z0_w, cudaMemcpyDeviceToHost); CUDA_CHECK; cudaFree(d_inpaint_locations); CUDA_CHECK; std::cout << "Inpainting depth values" << std::endl; cv::Mat zs_mat((int)dh->z0_w, (int)dh->z0_h, CV_32FC1, zs); cv::Mat zs_out_mat((int)dh->z0_w, (int)dh->z0_h, CV_32FC1); cv::Mat inpaint_locations_mat((int)dh->z0_w, (int)dh->z0_h, CV_8UC1, inpaint_locations); cv::inpaint(zs_mat, inpaint_locations_mat, zs_mat, 16, cv::INPAINT_TELEA); std::cout << "Smoothing depth" << std::endl; double min, max; cv::minMaxIdx(zs_mat, &min, &max); zs_mat = zs_mat / max; cv::bilateralFilter(zs_mat, zs_out_mat, -1, 2, 2); zs_out_mat *= max; cudaMemcpy(d_zs, zs_out_mat.data, sizeof(float)*dh->z0_h*dh->z0_w, cudaMemcpyHostToDevice); CUDA_CHECK; WRITE_MAT_FROM_DEVICE(d_zs, dh->z0_h*dh->z0_w, "zs_init.mat"); // Upscale of depth to get initial estimate of z (CPU) std::cout << "Resample depths" << std::endl; float* z_full = new float[dh->I_h*dh->I_w]; cv::Mat z_mat((int)dh->I_w, (int)dh->I_h, CV_32FC1, z_full); cv::resize(zs_out_mat, z_mat, cv::Size(dh->I_h, dh->I_w), 0, 0, cv::INTER_CUBIC); // Indices of Mask and Masks (CPU) std::cout << "Mask index calculation" << std::endl; thrust::host_vector<int> imask, imasks; int* index_in_masked_matrix = new int[dh->I_h*dh->I_w]; memset(index_in_masked_matrix, 0, sizeof(int)*dh->I_h*dh->I_w); int ctr = 0; for (int i = 0; i < dh->D.n_col; i++) { if (dh->mask[i] != 0) { imask.push_back(i); index_in_masked_matrix[i] = ctr++; } } for (int i = 0; i < dh->D.n_row; i++) { if (masks[i] != 0) imasks.push_back(i); } int npix = (int)imask.size(); int npixs = (int)imasks.size(); // Calculation of filtered resample matrix that operates only on the masked pixels (CPU) std::cout << "Masked resample matrix" << std::endl; thrust::host_vector<int> KT_row; thrust::host_vector<int> KT_col; thrust::sort(thrust::host, imask.begin(), imask.end()); thrust::sort(thrust::host, imasks.begin(), imasks.end()); for (int i = 0; i < dh->D.n_nz; i++) { thrust::detail::normal_iterator<int*> its = binary_find(imasks.begin(), imasks.end(), dh->D.row[i]); thrust::detail::normal_iterator<int*> it = binary_find(imask.begin(), imask.end(), dh->D.col[i]); if (its != imasks.end() && it != imask.end()) { KT_row.push_back(its - imasks.begin()); KT_col.push_back(it - imask.begin()); } } SparseCOO<float> KT((int)imasks.size(), (int)imask.size(), (int)KT_row.size()); memcpy(KT.row, KT_row.data(), KT_row.size() * sizeof(int)); memcpy(KT.col, KT_col.data(), KT_col.size() * sizeof(int)); for (size_t i = 0; i < KT_row.size(); i++) { KT.val[i] = 1.f / (dh->sf*dh->sf); } int* d_KT_row_ptr, *d_KT_col_ind; float* d_KT_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &KT, &d_KT_row_ptr, &d_KT_col_ind, &d_KT_val); KT.freeMemory(); // Create gradient matrices for non square shapes (CPU) std::cout << "Masked gradient matrix" << std::endl; std::pair<SparseCOO<float>, SparseCOO<float>> G = make_gradient(dh->mask, dh->I_h, dh->I_w, index_in_masked_matrix, (int)imask.size()); int* d_Dx_row_ptr, *d_Dx_col_ind, *d_Dy_row_ptr, *d_Dy_col_ind; float* d_Dx_val, *d_Dy_val; cuda_based_host_COO_to_device_CSR(cusp_handle, &G.first, &d_Dx_row_ptr, &d_Dx_col_ind, &d_Dx_val); cuda_based_host_COO_to_device_CSR(cusp_handle, &G.second, &d_Dy_row_ptr, &d_Dy_col_ind, &d_Dy_val); G.first.freeMemory(); G.second.freeMemory(); std::cout << "Initialization" << std::endl; // Lighting (s) initialization (GPU) float* d_s = NULL; cudaMalloc(&d_s, dh->I_n * dh->I_c * 4 * sizeof(float)); CUDA_CHECK; cudaMemset(d_s, 0, dh->I_c * 4 * dh->I_n * sizeof(float)); CUDA_CHECK; thrust::device_ptr<float> dt_s = thrust::device_pointer_cast(d_s); for (int i = 0; i < dh->I_n; i++) { for (int j = 0; j < dh->I_c; j++) { dt_s[i * 4 * dh->I_c + j * 4 + 2] = -1; } } // Albedo (rho) initialization (GPU) float* d_rho = cuda_based_rho_init(imask, dh->I_c); // Copying masked portion of I (GPU) float* d_I = NULL, *d_I_complete = NULL, *d_mask_extended = NULL; cudaMalloc(&d_I, imask.size() * dh->I_c * dh->I_n * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_I_complete, dh->I_w * dh->I_h * dh->I_c * sizeof(float)); CUDA_CHECK; cudaMalloc(&d_mask_extended, dh->I_w * dh->I_h * dh->I_c * sizeof(float)); CUDA_CHECK; for (int n = 0; n < dh->I_n; n++) { for (int i = 0; i < dh->I_c; i++) cudaMemcpy(d_mask_extended + dh->I_w * dh->I_h * i, d_mask, dh->I_w * dh->I_h * sizeof(float), cudaMemcpyDeviceToDevice); CUDA_CHECK; cudaMemcpy(d_I_complete, dh->I + n * dh->I_w * dh->I_h * dh->I_c, dh->I_w * dh->I_h * dh->I_c * sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_I_complete), THRUST_CAST(d_I_complete) + dh->I_c*dh->I_w*dh->I_h, THRUST_CAST(d_mask_extended), THRUST_CAST(d_I) + imask.size() * dh->I_c * n, is_one()); CUDA_CHECK; } cudaFree(d_mask_extended); CUDA_CHECK; cudaFree(d_I_complete); CUDA_CHECK; // Copying masked portion of LR depth (GPU) float *d_z0s = NULL; cudaMalloc(&d_z0s, sizeof(float)*imasks.size()); CUDA_CHECK;; thrust::copy_if(thrust::device, THRUST_CAST(d_zs), THRUST_CAST(d_zs) + dh->z0_h*dh->z0_w, THRUST_CAST(d_masks), THRUST_CAST(d_z0s), is_one()); CUDA_CHECK; // Copying masked portion of initial HR depth (GPU) float* d_z = NULL, *d_z_full = NULL; cudaMalloc(&d_z, sizeof(float)*imask.size()); CUDA_CHECK; cudaMalloc(&d_z_full, sizeof(float)*dh->I_h*dh->I_w); CUDA_CHECK; cudaMemcpy(d_z_full, z_full, sizeof(float)*dh->I_h*dh->I_w, cudaMemcpyHostToDevice); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_z_full), THRUST_CAST(d_z_full) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_z), is_one()); CUDA_CHECK; cudaFree(d_z_full); cudaFree(d_zs); CUDA_CHECK; WRITE_MAT_FROM_DEVICE(d_z, imask.size(), "z_init.mat"); // Meshgrid for normal estimation (GPU) float* d_xx = NULL, *d_yy = NULL; cudaMalloc(&d_xx, sizeof(float)*imask.size()); CUDA_CHECK; cudaMalloc(&d_yy, sizeof(float)*imask.size()); CUDA_CHECK; std::pair<float*, float*> d_meshgrid = cuda_based_meshgrid_create(dh->I_w, dh->I_h, dh->K[6], dh->K[7]); thrust::copy_if(thrust::device, THRUST_CAST(d_meshgrid.first), THRUST_CAST(d_meshgrid.first) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_xx), is_one()); CUDA_CHECK; thrust::copy_if(thrust::device, THRUST_CAST(d_meshgrid.second), THRUST_CAST(d_meshgrid.second) + dh->I_w*dh->I_h, THRUST_CAST(d_mask), THRUST_CAST(d_yy), is_one()); CUDA_CHECK; cudaFree(d_meshgrid.first); cudaFree(d_meshgrid.second); // zx and zy for normal estimation (GPU) float *d_zx = NULL, *d_zy = NULL; d_zx = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_z); d_zy = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_z); // Normal initialization (GPU) float* d_dz = NULL; float* d_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); float* d_init_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); // Core algorithm (GPU) float last_error = NAN; bool stop_loop = false; int iteration = 1; do { Timer timer; timer.start(); // Lighting estimation (GPU) cuda_based_lightning_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("\n%-25s: %-6.6fs\n", "Lightning Estimation", timer.get()); timer.start(); // Albedo estimation (GPU) cuda_based_albedo_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("%-25s: %-6.6fs\n", "Albedo Estimation", timer.get()); timer.start(); // Depth estimation (GPU) float error = cuda_based_depth_estimation(cublas_handle, cusp_handle, d_s, d_rho, d_N, d_I, d_xx, d_yy, d_dz, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_KT_row_ptr, d_KT_col_ind, d_KT_val, KT.n_row, KT.n_col, KT.n_nz, d_z0s, d_z, dh->K[0], dh->K[4], (int)imask.size(), dh->I_n, dh->I_c); timer.end(); printf("%-25s: %-6.6fs\n", "Depth Estimation", timer.get()); // Terminating conditions float rel_err = fabs(last_error - error) / fabs(error); if (error > last_error || rel_err < TOLERANCE || iteration > MAX_ITERATIONS) { stop_loop = true; } last_error = error; printf("\nIteration %02d summary\n", iteration); printf("%-25s: %-6.3f\n", "Error", error); printf("%-25s: %-6.3f\n", "Relative Error",rel_err); cudaFree(d_zx); CUDA_CHECK; cudaFree(d_zy); CUDA_CHECK; // Recalculate normals (GPU) d_zx = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dx_row_ptr, d_Dx_col_ind, d_Dx_val, G.first.n_row, G.first.n_col, G.first.n_nz, d_z); d_zy = cuda_based_sparsemat_densevec_mul(cusp_handle, d_Dy_row_ptr, d_Dy_col_ind, d_Dy_val, G.second.n_row, G.second.n_col, G.second.n_nz, d_z); cudaFree(d_dz); CUDA_CHECK; cudaFree(d_N); CUDA_CHECK; d_dz = NULL; d_N = cuda_based_normal_init(cublas_handle, d_z, d_zx, d_zy, d_xx, d_yy, (int)imask.size(), dh->K[0], dh->K[4], &d_dz); iteration++; // Visualizations float scale = 0.425f; cv::imshow("Normals-Initial", N_as_opencv_mat(d_init_N, imask, dh->I_h, dh->I_w, scale)); cv::moveWindow("Normals-Initial", 10, 10); cv::imshow("Normals-Current-Iteration", N_as_opencv_mat(d_N, imask, dh->I_h, dh->I_w, scale)); cv::moveWindow("Normals-Current-Iteration", (int)(30 + dh->I_h * scale), 10); cv::imshow("Albedo", rho_as_opencv_mat(d_rho, imask, dh->I_h, dh->I_w, dh->I_c, scale)); cv::moveWindow("Albedo", (int)(30 + 2 * dh->I_h * scale),10); cv::waitKey(5); // Dump as MAT files WRITE_MAT_FROM_DEVICE(d_s, dh->I_n * dh->I_c * 4, "s.mat"); WRITE_MAT_FROM_DEVICE(d_rho, imask.size() * dh->I_c, "rho.mat"); WRITE_MAT_FROM_DEVICE(d_z, imask.size(), "z.mat"); WRITE_MAT_FROM_DEVICE(d_N, imask.size() * 4, "N.mat"); } while (!stop_loop); std::cout << "Done!" << std::endl; cv::waitKey(0); if (cusparseDestroy(cusp_handle) != CUSPARSE_STATUS_SUCCESS) { throw std::runtime_error("CUSPARSE Library release of resources failed"); } if (cublasDestroy(cublas_handle) != CUBLAS_STATUS_SUCCESS) { throw std::runtime_error("CUBLAS Library release of resources failed"); } dh->D.freeMemory(); cudaFree(d_D_col_ind); CUDA_CHECK; cudaFree(d_D_row_ptr); CUDA_CHECK; cudaFree(d_D_val); CUDA_CHECK; cudaFree(d_Dx_col_ind); CUDA_CHECK; cudaFree(d_Dx_row_ptr); CUDA_CHECK; cudaFree(d_Dx_val); CUDA_CHECK; cudaFree(d_Dy_col_ind); CUDA_CHECK; cudaFree(d_Dy_row_ptr); CUDA_CHECK; cudaFree(d_Dy_val); CUDA_CHECK; cudaFree(d_mask); CUDA_CHECK; cudaFree(d_N); CUDA_CHECK; cudaFree(d_init_N); CUDA_CHECK; cudaFree(d_z); CUDA_CHECK; cudaFree(d_dz); CUDA_CHECK; cudaFree(d_z0s); CUDA_CHECK; cudaFree(d_masks); CUDA_CHECK; cudaFree(d_I); CUDA_CHECK; delete[] inpaint_mask; delete[] masks; delete[] inpaint_locations; delete[] zs; delete[] z_full; }
5804a8ff50523c5bbaa6bde68555a6dbfeb00d84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "IndiceTools.h" #include "DomaineMath.h" #include "cudaTools.h" #include "Device.h" #include "NewtonMath.h" #include "JuliaMath.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n) { float epsilon = 0.1f; NewtonMath newtonMath = NewtonMath(epsilon, n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) newtonMath.colorXY(&color, x, y); // update color ptrDevPixels[s] = color; s += NB_THREAD; } // must be present ! //delete newtonMath; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
5804a8ff50523c5bbaa6bde68555a6dbfeb00d84.cu
#include "Indice2D.h" #include "IndiceTools.h" #include "DomaineMath.h" #include "cudaTools.h" #include "Device.h" #include "NewtonMath.h" #include "JuliaMath.h" /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __global__ void newton(uchar4* ptrDevPixels, int w, int h, DomaineMath domaineMath, int n) { float epsilon = 0.1f; NewtonMath newtonMath = NewtonMath(epsilon, n); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; double x; double y; int pixelI; int pixelJ; int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) // (i,j) domaine ecran // (x,y) domaine math domaineMath.toXY(pixelI, pixelJ, &x, &y); // (i,j) -> (x,y) newtonMath.colorXY(&color, x, y); // update color ptrDevPixels[s] = color; s += NB_THREAD; } // must be present ! //delete newtonMath; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
94d2965a1f758973dc5966a059d8ebfd4303e865.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { //int index = 0; //int stride = 1; int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements //variable defition on system memory //float *x = new float[N]; //float *y = new float[N]; float *x, *y; //variable allocation on GPU memory hipMallocManaged (&x, N*sizeof(float)); hipMallocManaged (&y, N* sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU //add(N, x, y); // Run kernel on 1M parallel elements on the GPU hipLaunchKernelGGL(( add), dim3(1),dim3(256), 0, 0, N, x, y); // wait for the GPU to finish the results hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory //delete [] x; //delete [] y; // Free GPU memory hipFree(x); hipFree(y); return 0; }
94d2965a1f758973dc5966a059d8ebfd4303e865.cu
#include <iostream> #include <math.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { //int index = 0; //int stride = 1; int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements //variable defition on system memory //float *x = new float[N]; //float *y = new float[N]; float *x, *y; //variable allocation on GPU memory cudaMallocManaged (&x, N*sizeof(float)); cudaMallocManaged (&y, N* sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU //add(N, x, y); // Run kernel on 1M parallel elements on the GPU add<<<1,256>>>(N, x, y); // wait for the GPU to finish the results cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory //delete [] x; //delete [] y; // Free GPU memory cudaFree(x); cudaFree(y); return 0; }
34030429651195400e0f08733bce69fa6466f8cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * The kernel is assumed to be tuned to each device by selecting * the best performing combination of thread block dimensions * and tiling factors in X and Y. In this implementation tiling * in X increases the amount of work per thread block and tiling * in Y increases the amount of work per thread within the block. * * WARNING: THIS KERNEL IS FOR EDUCATIONAL PURPOSES ONLY. * PLEASE *DO NOT USE IT* IN PRODUCTION, USE A BLAS * LIBRARY SUCH AS CUBLAS, CLBLAST OR CUTLASS INSTEAD. * * @author Ben van Werkhoven <[email protected]> * */ #define WIDTH 4096 /* * Optimized CUDA kernel for matrix multiplication * * This kernel is optimized according to the directions given * in: "Better performance at lower occupancy" by V. Volkov, * GPU Technology Conference, GTC 2010. * * The thread block dimensions (block_size_x, block_size_y) * and tiling factors (tile_size_x, tile_size_y) are to be * tuned towards each GPU. This kernel assumes that * block_size_x = block_size_y * tile_size_y. * * The kernel computes C=A*B, where A, B, and C are square * matrices with height and width equal to WIDTH */ #if blocks_per_sm > 0 __launch_bounds__(block_size_x* block_size_y, blocks_per_sm) #endif __global__ void matmul_kernel(float* C, float* A, float* B) { __shared__ float sA[block_size_y * tile_size_y][block_size_x]; __shared__ float sB[block_size_y * tile_size_y][block_size_x * tile_size_x]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x; int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y; int k, kb; float sum[tile_size_y][tile_size_x]; #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { sum[i][j] = 0.0f; } } for (k = 0; k < WIDTH; k += block_size_x) { __syncthreads(); #pragma unroll for (int i = 0; i < tile_size_y; i++) { sA[ty + block_size_y * i][tx] = A[(y + i * block_size_y) * WIDTH + k + tx]; #pragma unroll for (int j = 0; j < tile_size_x; j++) { sB[ty + block_size_y * i][tx + j * block_size_x] = B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x]; } } __syncthreads(); //compute #pragma unroll for (kb = 0; kb < block_size_x; kb++) { #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x]; } } } } //store result #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j]; } } }
34030429651195400e0f08733bce69fa6466f8cb.cu
/** * The kernel is assumed to be tuned to each device by selecting * the best performing combination of thread block dimensions * and tiling factors in X and Y. In this implementation tiling * in X increases the amount of work per thread block and tiling * in Y increases the amount of work per thread within the block. * * WARNING: THIS KERNEL IS FOR EDUCATIONAL PURPOSES ONLY. * PLEASE *DO NOT USE IT* IN PRODUCTION, USE A BLAS * LIBRARY SUCH AS CUBLAS, CLBLAST OR CUTLASS INSTEAD. * * @author Ben van Werkhoven <[email protected]> * */ #define WIDTH 4096 /* * Optimized CUDA kernel for matrix multiplication * * This kernel is optimized according to the directions given * in: "Better performance at lower occupancy" by V. Volkov, * GPU Technology Conference, GTC 2010. * * The thread block dimensions (block_size_x, block_size_y) * and tiling factors (tile_size_x, tile_size_y) are to be * tuned towards each GPU. This kernel assumes that * block_size_x = block_size_y * tile_size_y. * * The kernel computes C=A*B, where A, B, and C are square * matrices with height and width equal to WIDTH */ #if blocks_per_sm > 0 __launch_bounds__(block_size_x* block_size_y, blocks_per_sm) #endif __global__ void matmul_kernel(float* C, float* A, float* B) { __shared__ float sA[block_size_y * tile_size_y][block_size_x]; __shared__ float sB[block_size_y * tile_size_y][block_size_x * tile_size_x]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x; int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y; int k, kb; float sum[tile_size_y][tile_size_x]; #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { sum[i][j] = 0.0f; } } for (k = 0; k < WIDTH; k += block_size_x) { __syncthreads(); #pragma unroll for (int i = 0; i < tile_size_y; i++) { sA[ty + block_size_y * i][tx] = A[(y + i * block_size_y) * WIDTH + k + tx]; #pragma unroll for (int j = 0; j < tile_size_x; j++) { sB[ty + block_size_y * i][tx + j * block_size_x] = B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x]; } } __syncthreads(); //compute #pragma unroll for (kb = 0; kb < block_size_x; kb++) { #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x]; } } } } //store result #pragma unroll for (int i = 0; i < tile_size_y; i++) { #pragma unroll for (int j = 0; j < tile_size_x; j++) { C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j]; } } }
2efc9cc1a926d0c0aa75069b44b5f292cc41bf71.hip
// !!! This is a file automatically generated by hipify!!! /** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 2048 #define NY 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = _fict_[t]; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { hz[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { double t_start, t_end; DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; hipMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); hipMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); hipMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); hipMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); hipMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax, hipMemcpyHostToDevice); hipMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), hipMemcpyHostToDevice); hipMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, hipMemcpyHostToDevice); hipMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)ceil(((float)NX) / ((float)block.y))); t_start = rtclock(); for(int t = 0; t< tmax; t++) { hipLaunchKernelGGL(( fdtd_step1_kernel), dim3(grid),dim3(block), 0, 0, _fict_gpu, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); hipLaunchKernelGGL(( fdtd_step2_kernel), dim3(grid),dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); hipLaunchKernelGGL(( fdtd_step3_kernel), dim3(grid),dim3(block), 0, 0, ex_gpu, ey_gpu, hz_gpu, t); hipDeviceSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); hipMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY, hipMemcpyDeviceToHost); hipFree(_fict_gpu); hipFree(ex_gpu); hipFree(ey_gpu); hipFree(hz_gpu); } int main(int argc, char *argv[]) { //double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); init_arrays(_fict_, ex, ey, hz); //GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); //t_start = rtclock(); //runFdtd(_fict_, ex, ey, hz); //t_end = rtclock(); //fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //compareResults(hz, hz_outputFromGpu); free(_fict_); free(ex); free(ey); free(hz); free(hz_outputFromGpu); return 0; }
2efc9cc1a926d0c0aa75069b44b5f292cc41bf71.cu
/** * fdtd2d.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 10.05 #define GPU_DEVICE 0 /* Problem size */ #define tmax 1 #define NX 2048 #define NY 2048 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int i, j; for (i = 0; i < tmax; i++) { _fict_[i] = (DATA_TYPE) i; } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { ex[i*NY + j] = ((DATA_TYPE) i*(j+1) + 1) / NX; ey[i*NY + j] = ((DATA_TYPE) (i-1)*(j+2) + 2) / NX; hz[i*NY + j] = ((DATA_TYPE) (i-9)*(j+4) + 3) / NX; } } } void runFdtd(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz) { int t, i, j; for (t=0; t < tmax; t++) { for (j=0; j < NY; j++) { ey[0*NY + j] = _fict_[t]; } for (i = 1; i < NX; i++) { for (j = 0; j < NY; j++) { ey[i*NY + j] = ey[i*NY + j] - 0.5*(hz[i*NY + j] - hz[(i-1)*NY + j]); } } for (i = 0; i < NX; i++) { for (j = 1; j < NY; j++) { ex[i*(NY+1) + j] = ex[i*(NY+1) + j] - 0.5*(hz[i*NY + j] - hz[i*NY + (j-1)]); } } for (i = 0; i < NX; i++) { for (j = 0; j < NY; j++) { hz[i*NY + j] = hz[i*NY + j] - 0.7*(ex[i*(NY+1) + (j+1)] - ex[i*(NY+1) + j] + ey[(i+1)*NY + j] - ey[i*NY + j]); } } } } void compareResults(DATA_TYPE* hz1, DATA_TYPE* hz2) { int i, j, fail; fail = 0; for (i=0; i < NX; i++) { for (j=0; j < NY; j++) { if (percentDiff(hz1[i*NY + j], hz2[i*NY + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void fdtd_step1_kernel(DATA_TYPE* _fict_, DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { if (i == 0) { ey[i * NY + j] = _fict_[t]; } else { ey[i * NY + j] = ey[i * NY + j] - 0.5f*(hz[i * NY + j] - hz[(i-1) * NY + j]); } } } __global__ void fdtd_step2_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY) && (j > 0)) { ex[i * (NY+1) + j] = ex[i * (NY+1) + j] - 0.5f*(hz[i * NY + j] - hz[i * NY + (j-1)]); } } __global__ void fdtd_step3_kernel(DATA_TYPE *ex, DATA_TYPE *ey, DATA_TYPE *hz, int t) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NX) && (j < NY)) { hz[i * NY + j] = hz[i * NY + j] - 0.7f*(ex[i * (NY+1) + (j+1)] - ex[i * (NY+1) + j] + ey[(i + 1) * NY + j] - ey[i * NY + j]); } } void fdtdCuda(DATA_TYPE* _fict_, DATA_TYPE* ex, DATA_TYPE* ey, DATA_TYPE* hz, DATA_TYPE* hz_outputFromGpu) { double t_start, t_end; DATA_TYPE *_fict_gpu; DATA_TYPE *ex_gpu; DATA_TYPE *ey_gpu; DATA_TYPE *hz_gpu; cudaMalloc((void **)&_fict_gpu, sizeof(DATA_TYPE) * tmax); cudaMalloc((void **)&ex_gpu, sizeof(DATA_TYPE) * NX * (NY + 1)); cudaMalloc((void **)&ey_gpu, sizeof(DATA_TYPE) * (NX + 1) * NY); cudaMalloc((void **)&hz_gpu, sizeof(DATA_TYPE) * NX * NY); cudaMemcpy(_fict_gpu, _fict_, sizeof(DATA_TYPE) * tmax, cudaMemcpyHostToDevice); cudaMemcpy(ex_gpu, ex, sizeof(DATA_TYPE) * NX * (NY + 1), cudaMemcpyHostToDevice); cudaMemcpy(ey_gpu, ey, sizeof(DATA_TYPE) * (NX + 1) * NY, cudaMemcpyHostToDevice); cudaMemcpy(hz_gpu, hz, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid( (size_t)ceil(((float)NY) / ((float)block.x)), (size_t)ceil(((float)NX) / ((float)block.y))); t_start = rtclock(); for(int t = 0; t< tmax; t++) { fdtd_step1_kernel<<<grid,block>>>(_fict_gpu, ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step2_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); fdtd_step3_kernel<<<grid,block>>>(ex_gpu, ey_gpu, hz_gpu, t); cudaThreadSynchronize(); } t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(hz_outputFromGpu, hz_gpu, sizeof(DATA_TYPE) * NX * NY, cudaMemcpyDeviceToHost); cudaFree(_fict_gpu); cudaFree(ex_gpu); cudaFree(ey_gpu); cudaFree(hz_gpu); } int main(int argc, char *argv[]) { //double t_start, t_end; DATA_TYPE* _fict_; DATA_TYPE* ex; DATA_TYPE* ey; DATA_TYPE* hz; DATA_TYPE* hz_outputFromGpu; _fict_ = (DATA_TYPE*)malloc(tmax*sizeof(DATA_TYPE)); ex = (DATA_TYPE*)malloc(NX*(NY+1)*sizeof(DATA_TYPE)); ey = (DATA_TYPE*)malloc((NX+1)*NY*sizeof(DATA_TYPE)); hz = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); hz_outputFromGpu = (DATA_TYPE*)malloc(NX*NY*sizeof(DATA_TYPE)); init_arrays(_fict_, ex, ey, hz); //GPU_argv_init(); fdtdCuda(_fict_, ex, ey, hz, hz_outputFromGpu); //t_start = rtclock(); //runFdtd(_fict_, ex, ey, hz); //t_end = rtclock(); //fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //compareResults(hz, hz_outputFromGpu); free(_fict_); free(ex); free(ey); free(hz); free(hz_outputFromGpu); return 0; }
93d641e7f21badc234b54a68c851d0124f34ae12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2010-2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Tuning tool for establishing optimal scan granularity configuration types ******************************************************************************/ #include <stdio.h> #include <map> #include <vector> #include <b40c/util/arch_dispatch.cuh> #include <b40c/util/cuda_properties.cuh> #include <b40c/util/numeric_traits.cuh> #include <b40c/util/parameter_generation.cuh> #include <b40c/util/enactor_base.cuh> #include <b40c/util/spine.cuh> #include <b40c/scan/problem_type.cuh> #include <b40c/scan/policy.cuh> // Test utils #include "b40c_test_util.h" using namespace b40c; /****************************************************************************** * Defines, constants, globals, and utility types ******************************************************************************/ #ifndef TUNE_ARCH #define TUNE_ARCH (200) #endif #ifndef TUNE_SIZE #define TUNE_SIZE (4) #endif bool g_verbose; int g_max_ctas = 0; int g_iterations = 0; bool g_verify; int g_policy_id = 0; struct KernelDetails { int threads; int tile_elements; KernelDetails( int threads, int tile_elements) : threads(threads), tile_elements(tile_elements) {} }; struct PassDetails { int uniform_smem_allocation; int uniform_grid_size; int over_subscribed; // Factory initializer PassDetails ( int uniform_smem_allocation, int uniform_grid_size, int over_subscribed) : uniform_smem_allocation(uniform_smem_allocation), uniform_grid_size(uniform_grid_size), over_subscribed(over_subscribed) {} // CSV string format std::string TypeString() { char buffer[1024]; sprintf(buffer, "%s, %s, %s", uniform_smem_allocation ? "true" : "false", uniform_grid_size ? "true" : "false", over_subscribed ? "true" : "false"); return buffer; } }; /****************************************************************************** * Test wrappers for binary, associative operations ******************************************************************************/ template <typename T> struct Sum { // Binary reduction __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) { return a + b; } // Identity __host__ __device__ __forceinline__ T operator()() { return 0; } static const bool IS_COMMUTATIVE = true; }; template <typename T> struct Max { // Binary reduction __host__ __device__ __forceinline__ T Op(const T &a, const T &b) { return (a > b) ? a : b; } // Identity __host__ __device__ __forceinline__ T operator()() { return 0; } static const bool IS_COMMUTATIVE = true; }; /****************************************************************************** * Utility routines ******************************************************************************/ /** * Displays the commandline usage for this tool */ void Usage() { printf("\ntune_scan [--device=<device index>] [--v] [--i=<num-iterations>] " "[--max-ctas=<max-thread-blocks>] [--n=<num-words>] [--verify]\n"); printf("\n"); printf("\t--v\tDisplays verbose configuration to the console.\n"); printf("\n"); printf("\t--verify\tChecks the result.\n"); printf("\n"); printf("\t--i\tPerforms the operation <num-iterations> times\n"); printf("\t\t\ton the device. Default = 1\n"); printf("\n"); printf("\t--n\tThe number of 32-bit words to comprise the sample problem\n"); printf("\n"); printf("\t--max-ctas\tThe number of CTAs to launch\n"); printf("\n"); } /****************************************************************************** * Upsweep Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct UpsweepTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, LOG_SCHEDULE_GRANULARITY, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; // LOG_SCHEDULE_GRANULARITY template <typename ParamList> struct Ranges<ParamList, LOG_SCHEDULE_GRANULARITY> { enum { MIN = util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, MAX = Ranges<ParamList, LOG_THREADS>::MAX + Ranges<ParamList, LOG_LOAD_VEC_SIZE>::MAX + Ranges<ParamList, LOG_LOADS_PER_TILE>::MAX }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, true, // CHECK_ALIGNMENT 0, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, util::Access<ParamList, LOG_SCHEDULE_GRANULARITY>::VALUE> > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0)), }; static std::string TypeString() { char buffer[32]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::upsweep::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Spine Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct SpineTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, false, // CHECK_ALIGNMENT 1, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, (util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE) > > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, SizeT, ReductionOp, IdentityOp); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), // ptxas dies on this special case INVALID_SPECIAL = (TUNE_ARCH < 200) && (sizeof(T) > 4) && (BaseKernelPolicy::LOG_TILE_ELEMENTS > 9), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0) && (INVALID_SPECIAL == 0)), }; static std::string TypeString() { char buffer[4096]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::spine::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Downsweep Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct DownsweepTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, LOG_SCHEDULE_GRANULARITY, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; // LOG_SCHEDULE_GRANULARITY template <typename ParamList> struct Ranges<ParamList, LOG_SCHEDULE_GRANULARITY> { enum { MIN = util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, MAX = Ranges<ParamList, LOG_THREADS>::MAX + Ranges<ParamList, LOG_LOAD_VEC_SIZE>::MAX + Ranges<ParamList, LOG_LOADS_PER_TILE>::MAX }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, true, // CHECK_ALIGNMENT 0, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, util::Access<ParamList, LOG_SCHEDULE_GRANULARITY>::VALUE> > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), // ptxas dies on this special case INVALID_SPECIAL = (TUNE_ARCH < 200) && (sizeof(T) > 4) && (BaseKernelPolicy::LOG_TILE_ELEMENTS > 9), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0) && (INVALID_SPECIAL == 0)), }; static std::string TypeString() { char buffer[32]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::downsweep::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Pass Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct PassTuningRanges { enum Param { BEGIN, OVERSUBSCRIBED_GRID_SIZE, END, // Parameters below here are currently not part of the tuning sweep READ_MODIFIER, WRITE_MODIFIER, UNIFORM_SMEM_ALLOCATION, UNIFORM_GRID_SIZE }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // READ_MODIFIER template <typename ParamList> struct Ranges<ParamList, READ_MODIFIER> { enum { MIN = util::io::ld::NONE, MAX = util::io::ld::LIMIT - 1, }; }; // WRITE_MODIFIER template <typename ParamList> struct Ranges<ParamList, WRITE_MODIFIER> { enum { MIN = util::io::st::NONE, MAX = util::io::st::LIMIT - 1, }; }; // UNIFORM_SMEM_ALLOCATION template <typename ParamList> struct Ranges<ParamList, UNIFORM_SMEM_ALLOCATION> { enum { MIN = 0, MAX = 1 }; }; // UNIFORM_GRID_SIZE template <typename ParamList> struct Ranges<ParamList, UNIFORM_GRID_SIZE> { enum { MIN = 0, MAX = 1 }; }; // OVERSUBSCRIBED_GRID_SIZE template <typename ParamList> struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> { enum { MIN = 0, MAX = 1 }; }; // Return pass details configuration from param tuple template <typename ParamList> static PassDetails Details() { return PassDetails( 0, // UNIFORM_SMEM_ALLOCATION 0, // UNIFORM_GRID_SIZE util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE); } }; /****************************************************************************** * Generators ******************************************************************************/ /** * Kernel-policy generator (callback) */ template < typename ProblemType, typename TuningRanges, typename ConfigMap> struct KernelGen { typedef typename ConfigMap::mapped_type GrainMap; // map (scheduling-granularity -> LaunchDetails) typedef typename GrainMap::value_type GrainLaunchDetails; // tuple (scheduling-granularity, LaunchDetails) typedef typename ConfigMap::value_type ConfigMapPair; // tuple (string, GrainMap) typedef typename GrainMap::mapped_type LaunchDetails; // tuple (KernelDetails, kernel function ptr) // Constructed map of kernel tuning configurations ConfigMap *config_map; // Constructor KernelGen(ConfigMap *config_map) : config_map(config_map) {} // Interface void Generate() { util::ParamListSweep< TuningRanges::BEGIN + 1, TuningRanges::END, TuningRanges::template Ranges>::template Invoke<util::EmptyTuple>(*this); } // Callback template <typename ParamList> void Invoke() { typedef typename TuningRanges::template KernelPolicy< ProblemType, ParamList> KernelPolicy; // Type string for this config family std::string typestring = KernelPolicy::TypeString(); // Create pairing between kernel-details and kernel-pointer LaunchDetails launch_details( KernelDetails( KernelPolicy::THREADS, KernelPolicy::TILE_ELEMENTS), KernelPolicy::Kernel()); // Create pairing between granularity and launch-details GrainLaunchDetails grain_launch_details( KernelPolicy::LOG_SCHEDULE_GRANULARITY, launch_details); // Check to see if we've started a grain list if (config_map->find(typestring) == config_map->end()) { // Not found. Insert grain pair into new grain map, insert grain map into config map GrainMap grain_map; grain_map.insert(grain_launch_details); config_map->insert(ConfigMapPair(typestring, grain_map)); } else { // Add this scheduling granularity to the config list config_map->find(typestring)->second.insert(grain_launch_details); } } }; /** * Pass policy-generator (callback) */ template < typename TuningRanges, typename ConfigList> struct PassGen { // Constructed map of kernel tuning configurations ConfigList *config_list; // Constructor PassGen(ConfigList *config_list) : config_list(config_list) {} // Interface void Generate() { util::ParamListSweep< TuningRanges::BEGIN + 1, TuningRanges::END, TuningRanges::template Ranges>::template Invoke<util::EmptyTuple>(*this); } // Callback template <typename ParamList> void Invoke() { PassDetails pass_details = TuningRanges::template Details<ParamList>(); config_list->push_back(pass_details); } }; template <typename ProblemType> struct Enactor : public util::EnactorBase { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; // Spine problem type typedef scan::ProblemType< typename ProblemType::T, typename ProblemType::SizeT, typename ProblemType::ReductionOp, typename ProblemType::IdentityOp, true, // EXCLUSIVE ProblemType::COMMUTATIVE> SpineProblemType; // Kernel pointer types typedef void (*UpsweepKernelPtr)(T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); typedef void (*SpineKernelPtr)(T*, T*, SizeT, ReductionOp, IdentityOp); typedef void (*DownsweepKernelPtr)(T*, T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); typedef std::pair<KernelDetails, UpsweepKernelPtr> UpsweepLaunchDetails; typedef std::pair<KernelDetails, SpineKernelPtr> SpineLaunchDetails; typedef std::pair<KernelDetails, DownsweepKernelPtr> DownsweepLaunchDetails; // Config grain-map types (LOG_GRANULARITY -> kernel pointer) typedef std::map<int, UpsweepLaunchDetails> UpsweepGrainMap; typedef std::map<int, SpineLaunchDetails> SpineGrainMap; typedef std::map<int, DownsweepLaunchDetails> DownsweepGrainMap; // Config map types (tune-string -> grain map) typedef std::map<std::string, UpsweepGrainMap> UpsweepMap; typedef std::map<std::string, SpineGrainMap> SpineMap; typedef std::map<std::string, DownsweepGrainMap> DownsweepMap; // Pass config list typedef std::vector<PassDetails> PassConfigList; // Configuration maps UpsweepMap upsweep_configs; SpineMap spine_configs; DownsweepMap downsweep_configs; PassConfigList pass_configs; // Temporary device storage needed for reducing partials produced // by separate CTAs util::Spine spine; T *d_dest; T *d_src; T *h_data; T *h_reference; SizeT num_elements; ReductionOp reduction_op; IdentityOp identity_op; /** * Constructor */ Enactor( ReductionOp reduction_op, IdentityOp identity_op) : d_dest(NULL), d_src(NULL), h_data(NULL), h_reference(NULL), reduction_op(reduction_op), identity_op(identity_op) { // Pre-allocate our spine if (spine.Setup<long long>(SmCount() * 8 * 8)) exit(1); // Generates all kernel config maps KernelGen<ProblemType, UpsweepTuningRanges, UpsweepMap> upsweep_gen(&upsweep_configs); KernelGen<SpineProblemType, SpineTuningRanges, SpineMap> spine_gen(&spine_configs); KernelGen<ProblemType, DownsweepTuningRanges, DownsweepMap> downsweep_gen(&downsweep_configs); PassGen<PassTuningRanges, PassConfigList> pass_gen(&pass_configs); upsweep_gen.Generate(); spine_gen.Generate(); downsweep_gen.Generate(); pass_gen.Generate(); } /** * */ hipError_t RunSample( int log_schedule_granularity, UpsweepLaunchDetails upsweep_details, SpineLaunchDetails spine_details, DownsweepLaunchDetails downsweep_details) { const bool OVERSUBSCRIBED_GRID_SIZE = true; const bool UNIFORM_SMEM_ALLOCATION = false; const bool UNIFORM_GRID_SIZE = false; hipError_t retval = hipSuccess; do { // Max CTA occupancy for the actual target device int max_cta_occupancy; if (retval = MaxCtaOccupancy( max_cta_occupancy, upsweep_details.second, upsweep_details.first.threads, downsweep_details.second, downsweep_details.first.threads)) break; // Compute sweep grid size int sweep_grid_size = GridSize( OVERSUBSCRIBED_GRID_SIZE, 1 << log_schedule_granularity, max_cta_occupancy, num_elements, g_max_ctas); // Use single-CTA kernel instead of multi-pass if problem is small enough if (num_elements <= spine_details.first.tile_elements * 3) { sweep_grid_size = 1; } // Compute spine elements: one element per CTA, rounded // up to nearest spine tile size int spine_elements = ((sweep_grid_size + spine_details.first.tile_elements - 1) / spine_details.first.tile_elements) * spine_details.first.tile_elements; // Obtain a CTA work distribution util::CtaWorkDistribution<SizeT> work; work.Init(num_elements, sweep_grid_size, log_schedule_granularity); if (ENACTOR_DEBUG) { printf("Work: "); work.Print(); } if (work.grid_size == 1) { if (ENACTOR_DEBUG) { printf("Sweep<<<%d,%d,%d>>>\n", 1, spine_details.first.threads, 0); } // Single-CTA, single-grid operation spine_detailshipLaunchKernelGGL((.second), dim3(1), dim3(spine_details.first.threads), 0, 0, d_src, d_dest, work.num_elements, reduction_op, identity_op); if (ENACTOR_DEBUG && (retval = util::B40CPerror(hipDeviceSynchronize(), "Enactor SingleKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; } else { // Make sure our spine is big enough if (retval = spine.Setup<T>(spine_elements)) break; int dynamic_smem[3] = {0, 0, 0}; int grid_size[3] = {work.grid_size, 1, work.grid_size}; // Tuning option: make sure all kernels have the same overall smem allocation if (UNIFORM_SMEM_ALLOCATION) if (retval = PadUniformSmem( dynamic_smem, upsweep_details.second, spine_details.second, downsweep_details.second)) break; // Tuning option: make sure that all kernels launch the same number of CTAs) if (UNIFORM_GRID_SIZE) grid_size[1] = grid_size[0]; if (ENACTOR_DEBUG) { printf("Upsweep<<<%d,%d,%d>>> Spine<<<%d,%d,%d>>> Downsweep<<<%d,%d,%d>>>\n", grid_size[0], upsweep_details.first.threads, dynamic_smem[0], grid_size[1], spine_details.first.threads, dynamic_smem[1], grid_size[2], downsweep_details.first.threads, dynamic_smem[2]); } // Upsweep into spine upsweep_detailshipLaunchKernelGGL((.second), dim3(grid_size[0]), dim3(upsweep_details.first.threads), dynamic_smem[0], 0, d_src, (T*) spine(), reduction_op, identity_op, work); if (ENACTOR_DEBUG && (retval = util::B40CPerror(hipDeviceSynchronize(), "Enactor UpsweepKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; // Spine scan spine_detailshipLaunchKernelGGL((.second), dim3(grid_size[1]), dim3(spine_details.first.threads), dynamic_smem[1], 0, (T*) spine(), (T*) spine(), spine_elements, reduction_op, identity_op); if (ENACTOR_DEBUG && (retval = util::B40CPerror(hipDeviceSynchronize(), "Enactor SpineKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; // Downsweep from spine downsweep_detailshipLaunchKernelGGL((.second), dim3(grid_size[2]), dim3(downsweep_details.first.threads), dynamic_smem[2], 0, d_src, d_dest, (T*) spine(), reduction_op, identity_op, work); if (ENACTOR_DEBUG && (retval = util::B40CPerror(hipDeviceSynchronize(), "Enactor DownsweepKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; } } while (0); return retval; } /** * */ void TimeSample( int log_schedule_granularity, UpsweepLaunchDetails upsweep_details, SpineLaunchDetails spine_details, DownsweepLaunchDetails downsweep_details) { // Check if valid for dispatch if (!upsweep_details.second || !spine_details.second || !downsweep_details.second) { return; } // Invoke kernels (warmup) ENACTOR_DEBUG = g_verbose; if (RunSample( log_schedule_granularity, upsweep_details, spine_details, downsweep_details)) { exit(1); } ENACTOR_DEBUG = false; // Perform the timed number of iterations GpuTimer timer; double elapsed = 0; for (int i = 0; i < g_iterations; i++) { // Start cuda timing record timer.Start(); // Invoke kernels if (RunSample( log_schedule_granularity, upsweep_details, spine_details, downsweep_details)) { exit(1); } // End cuda timing record timer.Stop(); elapsed += timer.ElapsedMillis(); // Flushes any stdio from the GPU if (util::B40CPerror(hipDeviceSynchronize(), "TimedCopy hipDeviceSynchronize failed: ", __FILE__, __LINE__)) { exit(1); } } // Display timing information double avg_runtime = elapsed / g_iterations; double throughput = 0.0; if (avg_runtime > 0.0) throughput = ((double) num_elements) / avg_runtime / 1000.0 / 1000.0; printf(", %f, %f, %f, ", avg_runtime, throughput, throughput * sizeof(T) * 3); fflush(stdout); if (g_verify) { // Copy out data if (util::B40CPerror(hipMemcpy( h_data, d_dest, sizeof(T) * num_elements, hipMemcpyDeviceToHost), "TimedScan hipMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1); // Verify solution CompareResults( h_data, h_reference, num_elements, true); } } /** * Iterates over configuration space */ void IterateConfigSpace() { int config_id = 0; // Iterate upsweep configs for (typename UpsweepMap::iterator upsweep_config_itr = upsweep_configs.begin(); upsweep_config_itr != upsweep_configs.end(); upsweep_config_itr++) { std::string upsweep_string = upsweep_config_itr->first; // Iterate downsweep configs for (typename DownsweepMap::iterator downsweep_config_itr = downsweep_configs.begin(); downsweep_config_itr != downsweep_configs.end(); downsweep_config_itr++) { std::string downsweep_string = downsweep_config_itr->first; typename UpsweepGrainMap::iterator upsweep_grain_itr = upsweep_config_itr->second.begin(); typename DownsweepGrainMap::iterator downsweep_grain_itr = downsweep_config_itr->second.begin(); while (true) { if ((upsweep_grain_itr == upsweep_config_itr->second.end()) || (downsweep_grain_itr == downsweep_config_itr->second.end())) { // Could not match grain printf("Could not match upsweep(%s) with downsweep(%s)\n", upsweep_string.c_str(), downsweep_string.c_str()); exit(1); } else if (upsweep_grain_itr->first == downsweep_grain_itr->first) { // Matched grain std::string downsweep_string = downsweep_config_itr->first; // Iterate spine configs for (typename SpineMap::iterator spine_config_itr = spine_configs.begin(); spine_config_itr != spine_configs.end(); spine_config_itr++) { std::string spine_string = spine_config_itr->first; // Iterate pass configs for (typename PassConfigList::iterator pass_config_itr = pass_configs.begin(); pass_config_itr != pass_configs.end(); pass_config_itr++) { std::string pass_string = pass_config_itr->TypeString(); printf("%d, %s, %d, %s, %s, %s", config_id, pass_string.c_str(), upsweep_grain_itr->first, // schedule grain upsweep_string.c_str(), spine_string.c_str(), downsweep_string.c_str()); config_id++; TimeSample( upsweep_grain_itr->first, upsweep_grain_itr->second, spine_config_itr->second.begin()->second, downsweep_grain_itr->second); printf("\n"); fflush(stdout); } } break; } else if (upsweep_grain_itr->first < downsweep_grain_itr->first) { upsweep_grain_itr++; } else { downsweep_grain_itr++; } } } } } /** * Creates an example problem and then dispatches the iterations * to the GPU for the given number of iterations, displaying runtime information. */ void Test(SizeT num_elements) { this->num_elements = num_elements; if (util::B40CPerror(hipMalloc((void**) &d_src, sizeof(T) * num_elements), "TimedScan hipMalloc d_src failed: ", __FILE__, __LINE__)) exit(1); if (util::B40CPerror(hipMalloc((void**) &d_dest, sizeof(T) * num_elements), "TimedScan hipMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1); if ((h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) { fprintf(stderr, "Host malloc of problem data failed\n"); exit(1); } if ((h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) { fprintf(stderr, "Host malloc of problem data failed\n"); exit(1); } h_reference[0] = identity_op(); for (SizeT i = 0; i < num_elements; ++i) { // util::RandomBits<T>(h_data[i], 0); h_data[i] = i; h_reference[i] = (i == 0) ? identity_op() : reduction_op(h_reference[i - 1], h_data[i - 1]); } // Move a fresh copy of the problem into device storage if (util::B40CPerror(hipMemcpy(d_src, h_data, sizeof(T) * num_elements, hipMemcpyHostToDevice), "TimedScan hipMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1); // Iterate configuration space IterateConfigSpace(); // Free allocated memory if (d_src) hipFree(d_src); if (d_dest) hipFree(d_dest); // Free our allocated host memory if (h_data) free(h_data); if (h_reference) free(h_reference); } }; /****************************************************************************** * Test ******************************************************************************/ /** * Creates an example problem and then dispatches the iterations * to the GPU for the given number of iterations, displaying runtime information. */ template< typename T, typename SizeT, typename ReductionOp, typename IdentityOp> void Test( SizeT num_elements, ReductionOp reduction_op, IdentityOp identity_op) { // Establish the problem types typedef scan::ProblemType< T, SizeT, ReductionOp, IdentityOp, true, // EXCLUSIVE, true> // COMMUTATIVE ProblemType; // Create enactor Enactor<ProblemType> enactor( reduction_op, identity_op); // Run test enactor.Test(num_elements); } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); DeviceInit(args); // Seed random number generator srand(0); // presently deterministic // Use 32-bit integer for array indexing typedef int SizeT; SizeT num_elements = 1024; // Parse command line arguments if (args.CheckCmdLineFlag("help")) { Usage(); return 0; } args.GetCmdLineArgument("i", g_iterations); args.GetCmdLineArgument("n", num_elements); args.GetCmdLineArgument("max-ctas", g_max_ctas); g_verify = args.CheckCmdLineFlag("verify"); g_verbose = args.CheckCmdLineFlag("v"); util::CudaProperties cuda_props; printf("Test Scan: %d iterations, %lu elements", g_iterations, (unsigned long) num_elements); printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n", cuda_props.device_sm_version, cuda_props.kernel_ptx_version); printf("" "TuneID, " "UNIFORM_SMEM_ALLOCATION, " "UNIFORM_GRID_SIZE, " "OVERSUBSCRIBED_GRID_SIZE, " "SCHEDULING_GRANULARITY, " "UPSWEEP_LOG_THREADS, " "UPSWEEP_LOG_LOAD_VEC_SIZE, " "UPSWEEP_LOG_LOADS_PER_TILE, " "SPINE_LOG_THREADS, " "SPINE_LOG_LOAD_VEC_SIZE, " "SPINE_LOG_LOADS_PER_TILE, " "DOWNSWEEP_LOG_THREADS, " "DOWNSWEEP_LOG_LOAD_VEC_SIZE, " "DOWNSWEEP_LOG_LOADS_PER_TILE, " "elapsed time (ms), " "throughput (10^9 items/s), " "bandwidth (10^9 B/s)"); if (g_verify) printf(", Correctness"); printf("\n"); // Execute test(s) #if (TUNE_SIZE == 0) || (TUNE_SIZE == 1) { typedef unsigned char T; Sum<T> binary_op; Test<T>(num_elements * 4, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 2) { typedef unsigned short T; Sum<T> binary_op; Test<T>(num_elements * 2, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 4) { typedef unsigned int T; Sum<T> binary_op; Test<T>(num_elements, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 8) { typedef unsigned long long T; Sum<T> binary_op; Test<T>(num_elements / 2, binary_op, binary_op); } #endif return 0; }
93d641e7f21badc234b54a68c851d0124f34ae12.cu
/****************************************************************************** * Copyright (c) 2010-2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Tuning tool for establishing optimal scan granularity configuration types ******************************************************************************/ #include <stdio.h> #include <map> #include <vector> #include <b40c/util/arch_dispatch.cuh> #include <b40c/util/cuda_properties.cuh> #include <b40c/util/numeric_traits.cuh> #include <b40c/util/parameter_generation.cuh> #include <b40c/util/enactor_base.cuh> #include <b40c/util/spine.cuh> #include <b40c/scan/problem_type.cuh> #include <b40c/scan/policy.cuh> // Test utils #include "b40c_test_util.h" using namespace b40c; /****************************************************************************** * Defines, constants, globals, and utility types ******************************************************************************/ #ifndef TUNE_ARCH #define TUNE_ARCH (200) #endif #ifndef TUNE_SIZE #define TUNE_SIZE (4) #endif bool g_verbose; int g_max_ctas = 0; int g_iterations = 0; bool g_verify; int g_policy_id = 0; struct KernelDetails { int threads; int tile_elements; KernelDetails( int threads, int tile_elements) : threads(threads), tile_elements(tile_elements) {} }; struct PassDetails { int uniform_smem_allocation; int uniform_grid_size; int over_subscribed; // Factory initializer PassDetails ( int uniform_smem_allocation, int uniform_grid_size, int over_subscribed) : uniform_smem_allocation(uniform_smem_allocation), uniform_grid_size(uniform_grid_size), over_subscribed(over_subscribed) {} // CSV string format std::string TypeString() { char buffer[1024]; sprintf(buffer, "%s, %s, %s", uniform_smem_allocation ? "true" : "false", uniform_grid_size ? "true" : "false", over_subscribed ? "true" : "false"); return buffer; } }; /****************************************************************************** * Test wrappers for binary, associative operations ******************************************************************************/ template <typename T> struct Sum { // Binary reduction __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) { return a + b; } // Identity __host__ __device__ __forceinline__ T operator()() { return 0; } static const bool IS_COMMUTATIVE = true; }; template <typename T> struct Max { // Binary reduction __host__ __device__ __forceinline__ T Op(const T &a, const T &b) { return (a > b) ? a : b; } // Identity __host__ __device__ __forceinline__ T operator()() { return 0; } static const bool IS_COMMUTATIVE = true; }; /****************************************************************************** * Utility routines ******************************************************************************/ /** * Displays the commandline usage for this tool */ void Usage() { printf("\ntune_scan [--device=<device index>] [--v] [--i=<num-iterations>] " "[--max-ctas=<max-thread-blocks>] [--n=<num-words>] [--verify]\n"); printf("\n"); printf("\t--v\tDisplays verbose configuration to the console.\n"); printf("\n"); printf("\t--verify\tChecks the result.\n"); printf("\n"); printf("\t--i\tPerforms the operation <num-iterations> times\n"); printf("\t\t\ton the device. Default = 1\n"); printf("\n"); printf("\t--n\tThe number of 32-bit words to comprise the sample problem\n"); printf("\n"); printf("\t--max-ctas\tThe number of CTAs to launch\n"); printf("\n"); } /****************************************************************************** * Upsweep Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct UpsweepTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, LOG_SCHEDULE_GRANULARITY, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; // LOG_SCHEDULE_GRANULARITY template <typename ParamList> struct Ranges<ParamList, LOG_SCHEDULE_GRANULARITY> { enum { MIN = util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, MAX = Ranges<ParamList, LOG_THREADS>::MAX + Ranges<ParamList, LOG_LOAD_VEC_SIZE>::MAX + Ranges<ParamList, LOG_LOADS_PER_TILE>::MAX }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, true, // CHECK_ALIGNMENT 0, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, util::Access<ParamList, LOG_SCHEDULE_GRANULARITY>::VALUE> > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0)), }; static std::string TypeString() { char buffer[32]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::upsweep::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Spine Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct SpineTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, false, // CHECK_ALIGNMENT 1, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, (util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE) > > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, SizeT, ReductionOp, IdentityOp); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), // ptxas dies on this special case INVALID_SPECIAL = (TUNE_ARCH < 200) && (sizeof(T) > 4) && (BaseKernelPolicy::LOG_TILE_ELEMENTS > 9), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0) && (INVALID_SPECIAL == 0)), }; static std::string TypeString() { char buffer[4096]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::spine::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Downsweep Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct DownsweepTuningRanges { /** * Tuning params */ enum Param { BEGIN, LOG_THREADS, LOG_LOAD_VEC_SIZE, LOG_LOADS_PER_TILE, LOG_SCHEDULE_GRANULARITY, END, }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // LOG_THREADS template <typename ParamList> struct Ranges<ParamList, LOG_THREADS> { enum { MIN = 5, // 32 MAX = 10 // 1024 }; }; // LOG_LOAD_VEC_SIZE template <typename ParamList> struct Ranges<ParamList, LOG_LOAD_VEC_SIZE> { enum { MIN = 0, MAX = 2 }; }; // LOG_LOADS_PER_TILE template <typename ParamList> struct Ranges<ParamList, LOG_LOADS_PER_TILE> { enum { MIN = 0, MAX = 2 }; }; // LOG_SCHEDULE_GRANULARITY template <typename ParamList> struct Ranges<ParamList, LOG_SCHEDULE_GRANULARITY> { enum { MIN = util::Access<ParamList, LOG_THREADS>::VALUE + util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE + util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, MAX = Ranges<ParamList, LOG_THREADS>::MAX + Ranges<ParamList, LOG_LOAD_VEC_SIZE>::MAX + Ranges<ParamList, LOG_LOADS_PER_TILE>::MAX }; }; /** * Policy */ template < typename ProblemType, typename ParamList, typename BaseKernelPolicy = scan::KernelPolicy < ProblemType, TUNE_ARCH, true, // CHECK_ALIGNMENT 0, // MIN_CTA_OCCUPANCY, util::Access<ParamList, LOG_THREADS>::VALUE, // LOG_THREADS, util::Access<ParamList, LOG_LOAD_VEC_SIZE>::VALUE, // LOG_LOAD_VEC_SIZE, util::Access<ParamList, LOG_LOADS_PER_TILE>::VALUE, // LOG_LOADS_PER_TILE, B40C_LOG_WARP_THREADS(TUNE_ARCH), // LOG_RAKING_THREADS, util::io::ld::NONE, // READ_MODIFIER, util::io::st::NONE, // WRITE_MODIFIER, util::Access<ParamList, LOG_SCHEDULE_GRANULARITY>::VALUE> > // LOG_SCHEDULE_GRANULARITY struct KernelPolicy : BaseKernelPolicy { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; typedef void (*KernelPtr)(T*, T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); // Check if this configuration is worth compiling enum { REG_MULTIPLIER = (sizeof(T) + 4 - 1) / 4, REGS_ESTIMATE = (REG_MULTIPLIER * KernelPolicy::TILE_ELEMENTS_PER_THREAD) + 2, EST_REGS_OCCUPANCY = B40C_SM_REGISTERS(TUNE_ARCH) / (REGS_ESTIMATE * KernelPolicy::THREADS), // ptxas dies on this special case INVALID_SPECIAL = (TUNE_ARCH < 200) && (sizeof(T) > 4) && (BaseKernelPolicy::LOG_TILE_ELEMENTS > 9), VALID_COMPILE = ((BaseKernelPolicy::VALID > 0) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::READ_MODIFIER == util::io::ld::NONE)) && ((TUNE_ARCH >= 200) || (BaseKernelPolicy::WRITE_MODIFIER == util::io::st::NONE)) && (BaseKernelPolicy::LOG_THREADS <= B40C_LOG_CTA_THREADS(TUNE_ARCH)) && (EST_REGS_OCCUPANCY > 0) && (INVALID_SPECIAL == 0)), }; static std::string TypeString() { char buffer[32]; sprintf(buffer, "%d, %d, %d", KernelPolicy::LOG_THREADS, KernelPolicy::LOG_LOAD_VEC_SIZE, KernelPolicy::LOG_LOADS_PER_TILE); return buffer; } template <int VALID, int DUMMY = 0> struct GenKernel { static KernelPtr Kernel() { return scan::downsweep::Kernel<KernelPolicy>; } }; template <int DUMMY> struct GenKernel<0, DUMMY> { static KernelPtr Kernel() { return NULL; } }; static KernelPtr Kernel() { return GenKernel<VALID_COMPILE>::Kernel(); } }; }; /****************************************************************************** * Pass Tuning Parameter Enumerations and Ranges ******************************************************************************/ struct PassTuningRanges { enum Param { BEGIN, OVERSUBSCRIBED_GRID_SIZE, END, // Parameters below here are currently not part of the tuning sweep READ_MODIFIER, WRITE_MODIFIER, UNIFORM_SMEM_ALLOCATION, UNIFORM_GRID_SIZE }; /** * Ranges for the tuning params */ template <typename ParamList, int PARAM> struct Ranges; // READ_MODIFIER template <typename ParamList> struct Ranges<ParamList, READ_MODIFIER> { enum { MIN = util::io::ld::NONE, MAX = util::io::ld::LIMIT - 1, }; }; // WRITE_MODIFIER template <typename ParamList> struct Ranges<ParamList, WRITE_MODIFIER> { enum { MIN = util::io::st::NONE, MAX = util::io::st::LIMIT - 1, }; }; // UNIFORM_SMEM_ALLOCATION template <typename ParamList> struct Ranges<ParamList, UNIFORM_SMEM_ALLOCATION> { enum { MIN = 0, MAX = 1 }; }; // UNIFORM_GRID_SIZE template <typename ParamList> struct Ranges<ParamList, UNIFORM_GRID_SIZE> { enum { MIN = 0, MAX = 1 }; }; // OVERSUBSCRIBED_GRID_SIZE template <typename ParamList> struct Ranges<ParamList, OVERSUBSCRIBED_GRID_SIZE> { enum { MIN = 0, MAX = 1 }; }; // Return pass details configuration from param tuple template <typename ParamList> static PassDetails Details() { return PassDetails( 0, // UNIFORM_SMEM_ALLOCATION 0, // UNIFORM_GRID_SIZE util::Access<ParamList, OVERSUBSCRIBED_GRID_SIZE>::VALUE); } }; /****************************************************************************** * Generators ******************************************************************************/ /** * Kernel-policy generator (callback) */ template < typename ProblemType, typename TuningRanges, typename ConfigMap> struct KernelGen { typedef typename ConfigMap::mapped_type GrainMap; // map (scheduling-granularity -> LaunchDetails) typedef typename GrainMap::value_type GrainLaunchDetails; // tuple (scheduling-granularity, LaunchDetails) typedef typename ConfigMap::value_type ConfigMapPair; // tuple (string, GrainMap) typedef typename GrainMap::mapped_type LaunchDetails; // tuple (KernelDetails, kernel function ptr) // Constructed map of kernel tuning configurations ConfigMap *config_map; // Constructor KernelGen(ConfigMap *config_map) : config_map(config_map) {} // Interface void Generate() { util::ParamListSweep< TuningRanges::BEGIN + 1, TuningRanges::END, TuningRanges::template Ranges>::template Invoke<util::EmptyTuple>(*this); } // Callback template <typename ParamList> void Invoke() { typedef typename TuningRanges::template KernelPolicy< ProblemType, ParamList> KernelPolicy; // Type string for this config family std::string typestring = KernelPolicy::TypeString(); // Create pairing between kernel-details and kernel-pointer LaunchDetails launch_details( KernelDetails( KernelPolicy::THREADS, KernelPolicy::TILE_ELEMENTS), KernelPolicy::Kernel()); // Create pairing between granularity and launch-details GrainLaunchDetails grain_launch_details( KernelPolicy::LOG_SCHEDULE_GRANULARITY, launch_details); // Check to see if we've started a grain list if (config_map->find(typestring) == config_map->end()) { // Not found. Insert grain pair into new grain map, insert grain map into config map GrainMap grain_map; grain_map.insert(grain_launch_details); config_map->insert(ConfigMapPair(typestring, grain_map)); } else { // Add this scheduling granularity to the config list config_map->find(typestring)->second.insert(grain_launch_details); } } }; /** * Pass policy-generator (callback) */ template < typename TuningRanges, typename ConfigList> struct PassGen { // Constructed map of kernel tuning configurations ConfigList *config_list; // Constructor PassGen(ConfigList *config_list) : config_list(config_list) {} // Interface void Generate() { util::ParamListSweep< TuningRanges::BEGIN + 1, TuningRanges::END, TuningRanges::template Ranges>::template Invoke<util::EmptyTuple>(*this); } // Callback template <typename ParamList> void Invoke() { PassDetails pass_details = TuningRanges::template Details<ParamList>(); config_list->push_back(pass_details); } }; template <typename ProblemType> struct Enactor : public util::EnactorBase { typedef typename ProblemType::T T; typedef typename ProblemType::SizeT SizeT; typedef typename ProblemType::ReductionOp ReductionOp; typedef typename ProblemType::IdentityOp IdentityOp; // Spine problem type typedef scan::ProblemType< typename ProblemType::T, typename ProblemType::SizeT, typename ProblemType::ReductionOp, typename ProblemType::IdentityOp, true, // EXCLUSIVE ProblemType::COMMUTATIVE> SpineProblemType; // Kernel pointer types typedef void (*UpsweepKernelPtr)(T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); typedef void (*SpineKernelPtr)(T*, T*, SizeT, ReductionOp, IdentityOp); typedef void (*DownsweepKernelPtr)(T*, T*, T*, ReductionOp, IdentityOp, util::CtaWorkDistribution<SizeT>); typedef std::pair<KernelDetails, UpsweepKernelPtr> UpsweepLaunchDetails; typedef std::pair<KernelDetails, SpineKernelPtr> SpineLaunchDetails; typedef std::pair<KernelDetails, DownsweepKernelPtr> DownsweepLaunchDetails; // Config grain-map types (LOG_GRANULARITY -> kernel pointer) typedef std::map<int, UpsweepLaunchDetails> UpsweepGrainMap; typedef std::map<int, SpineLaunchDetails> SpineGrainMap; typedef std::map<int, DownsweepLaunchDetails> DownsweepGrainMap; // Config map types (tune-string -> grain map) typedef std::map<std::string, UpsweepGrainMap> UpsweepMap; typedef std::map<std::string, SpineGrainMap> SpineMap; typedef std::map<std::string, DownsweepGrainMap> DownsweepMap; // Pass config list typedef std::vector<PassDetails> PassConfigList; // Configuration maps UpsweepMap upsweep_configs; SpineMap spine_configs; DownsweepMap downsweep_configs; PassConfigList pass_configs; // Temporary device storage needed for reducing partials produced // by separate CTAs util::Spine spine; T *d_dest; T *d_src; T *h_data; T *h_reference; SizeT num_elements; ReductionOp reduction_op; IdentityOp identity_op; /** * Constructor */ Enactor( ReductionOp reduction_op, IdentityOp identity_op) : d_dest(NULL), d_src(NULL), h_data(NULL), h_reference(NULL), reduction_op(reduction_op), identity_op(identity_op) { // Pre-allocate our spine if (spine.Setup<long long>(SmCount() * 8 * 8)) exit(1); // Generates all kernel config maps KernelGen<ProblemType, UpsweepTuningRanges, UpsweepMap> upsweep_gen(&upsweep_configs); KernelGen<SpineProblemType, SpineTuningRanges, SpineMap> spine_gen(&spine_configs); KernelGen<ProblemType, DownsweepTuningRanges, DownsweepMap> downsweep_gen(&downsweep_configs); PassGen<PassTuningRanges, PassConfigList> pass_gen(&pass_configs); upsweep_gen.Generate(); spine_gen.Generate(); downsweep_gen.Generate(); pass_gen.Generate(); } /** * */ cudaError_t RunSample( int log_schedule_granularity, UpsweepLaunchDetails upsweep_details, SpineLaunchDetails spine_details, DownsweepLaunchDetails downsweep_details) { const bool OVERSUBSCRIBED_GRID_SIZE = true; const bool UNIFORM_SMEM_ALLOCATION = false; const bool UNIFORM_GRID_SIZE = false; cudaError_t retval = cudaSuccess; do { // Max CTA occupancy for the actual target device int max_cta_occupancy; if (retval = MaxCtaOccupancy( max_cta_occupancy, upsweep_details.second, upsweep_details.first.threads, downsweep_details.second, downsweep_details.first.threads)) break; // Compute sweep grid size int sweep_grid_size = GridSize( OVERSUBSCRIBED_GRID_SIZE, 1 << log_schedule_granularity, max_cta_occupancy, num_elements, g_max_ctas); // Use single-CTA kernel instead of multi-pass if problem is small enough if (num_elements <= spine_details.first.tile_elements * 3) { sweep_grid_size = 1; } // Compute spine elements: one element per CTA, rounded // up to nearest spine tile size int spine_elements = ((sweep_grid_size + spine_details.first.tile_elements - 1) / spine_details.first.tile_elements) * spine_details.first.tile_elements; // Obtain a CTA work distribution util::CtaWorkDistribution<SizeT> work; work.Init(num_elements, sweep_grid_size, log_schedule_granularity); if (ENACTOR_DEBUG) { printf("Work: "); work.Print(); } if (work.grid_size == 1) { if (ENACTOR_DEBUG) { printf("Sweep<<<%d,%d,%d>>>\n", 1, spine_details.first.threads, 0); } // Single-CTA, single-grid operation spine_details.second<<<1, spine_details.first.threads, 0>>>( d_src, d_dest, work.num_elements, reduction_op, identity_op); if (ENACTOR_DEBUG && (retval = util::B40CPerror(cudaThreadSynchronize(), "Enactor SingleKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; } else { // Make sure our spine is big enough if (retval = spine.Setup<T>(spine_elements)) break; int dynamic_smem[3] = {0, 0, 0}; int grid_size[3] = {work.grid_size, 1, work.grid_size}; // Tuning option: make sure all kernels have the same overall smem allocation if (UNIFORM_SMEM_ALLOCATION) if (retval = PadUniformSmem( dynamic_smem, upsweep_details.second, spine_details.second, downsweep_details.second)) break; // Tuning option: make sure that all kernels launch the same number of CTAs) if (UNIFORM_GRID_SIZE) grid_size[1] = grid_size[0]; if (ENACTOR_DEBUG) { printf("Upsweep<<<%d,%d,%d>>> Spine<<<%d,%d,%d>>> Downsweep<<<%d,%d,%d>>>\n", grid_size[0], upsweep_details.first.threads, dynamic_smem[0], grid_size[1], spine_details.first.threads, dynamic_smem[1], grid_size[2], downsweep_details.first.threads, dynamic_smem[2]); } // Upsweep into spine upsweep_details.second<<<grid_size[0], upsweep_details.first.threads, dynamic_smem[0]>>>( d_src, (T*) spine(), reduction_op, identity_op, work); if (ENACTOR_DEBUG && (retval = util::B40CPerror(cudaThreadSynchronize(), "Enactor UpsweepKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; // Spine scan spine_details.second<<<grid_size[1], spine_details.first.threads, dynamic_smem[1]>>>( (T*) spine(), (T*) spine(), spine_elements, reduction_op, identity_op); if (ENACTOR_DEBUG && (retval = util::B40CPerror(cudaThreadSynchronize(), "Enactor SpineKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; // Downsweep from spine downsweep_details.second<<<grid_size[2], downsweep_details.first.threads, dynamic_smem[2]>>>( d_src, d_dest, (T*) spine(), reduction_op, identity_op, work); if (ENACTOR_DEBUG && (retval = util::B40CPerror(cudaThreadSynchronize(), "Enactor DownsweepKernel failed ", __FILE__, __LINE__, ENACTOR_DEBUG))) break; } } while (0); return retval; } /** * */ void TimeSample( int log_schedule_granularity, UpsweepLaunchDetails upsweep_details, SpineLaunchDetails spine_details, DownsweepLaunchDetails downsweep_details) { // Check if valid for dispatch if (!upsweep_details.second || !spine_details.second || !downsweep_details.second) { return; } // Invoke kernels (warmup) ENACTOR_DEBUG = g_verbose; if (RunSample( log_schedule_granularity, upsweep_details, spine_details, downsweep_details)) { exit(1); } ENACTOR_DEBUG = false; // Perform the timed number of iterations GpuTimer timer; double elapsed = 0; for (int i = 0; i < g_iterations; i++) { // Start cuda timing record timer.Start(); // Invoke kernels if (RunSample( log_schedule_granularity, upsweep_details, spine_details, downsweep_details)) { exit(1); } // End cuda timing record timer.Stop(); elapsed += timer.ElapsedMillis(); // Flushes any stdio from the GPU if (util::B40CPerror(cudaThreadSynchronize(), "TimedCopy cudaThreadSynchronize failed: ", __FILE__, __LINE__)) { exit(1); } } // Display timing information double avg_runtime = elapsed / g_iterations; double throughput = 0.0; if (avg_runtime > 0.0) throughput = ((double) num_elements) / avg_runtime / 1000.0 / 1000.0; printf(", %f, %f, %f, ", avg_runtime, throughput, throughput * sizeof(T) * 3); fflush(stdout); if (g_verify) { // Copy out data if (util::B40CPerror(cudaMemcpy( h_data, d_dest, sizeof(T) * num_elements, cudaMemcpyDeviceToHost), "TimedScan cudaMemcpy d_dest failed: ", __FILE__, __LINE__)) exit(1); // Verify solution CompareResults( h_data, h_reference, num_elements, true); } } /** * Iterates over configuration space */ void IterateConfigSpace() { int config_id = 0; // Iterate upsweep configs for (typename UpsweepMap::iterator upsweep_config_itr = upsweep_configs.begin(); upsweep_config_itr != upsweep_configs.end(); upsweep_config_itr++) { std::string upsweep_string = upsweep_config_itr->first; // Iterate downsweep configs for (typename DownsweepMap::iterator downsweep_config_itr = downsweep_configs.begin(); downsweep_config_itr != downsweep_configs.end(); downsweep_config_itr++) { std::string downsweep_string = downsweep_config_itr->first; typename UpsweepGrainMap::iterator upsweep_grain_itr = upsweep_config_itr->second.begin(); typename DownsweepGrainMap::iterator downsweep_grain_itr = downsweep_config_itr->second.begin(); while (true) { if ((upsweep_grain_itr == upsweep_config_itr->second.end()) || (downsweep_grain_itr == downsweep_config_itr->second.end())) { // Could not match grain printf("Could not match upsweep(%s) with downsweep(%s)\n", upsweep_string.c_str(), downsweep_string.c_str()); exit(1); } else if (upsweep_grain_itr->first == downsweep_grain_itr->first) { // Matched grain std::string downsweep_string = downsweep_config_itr->first; // Iterate spine configs for (typename SpineMap::iterator spine_config_itr = spine_configs.begin(); spine_config_itr != spine_configs.end(); spine_config_itr++) { std::string spine_string = spine_config_itr->first; // Iterate pass configs for (typename PassConfigList::iterator pass_config_itr = pass_configs.begin(); pass_config_itr != pass_configs.end(); pass_config_itr++) { std::string pass_string = pass_config_itr->TypeString(); printf("%d, %s, %d, %s, %s, %s", config_id, pass_string.c_str(), upsweep_grain_itr->first, // schedule grain upsweep_string.c_str(), spine_string.c_str(), downsweep_string.c_str()); config_id++; TimeSample( upsweep_grain_itr->first, upsweep_grain_itr->second, spine_config_itr->second.begin()->second, downsweep_grain_itr->second); printf("\n"); fflush(stdout); } } break; } else if (upsweep_grain_itr->first < downsweep_grain_itr->first) { upsweep_grain_itr++; } else { downsweep_grain_itr++; } } } } } /** * Creates an example problem and then dispatches the iterations * to the GPU for the given number of iterations, displaying runtime information. */ void Test(SizeT num_elements) { this->num_elements = num_elements; if (util::B40CPerror(cudaMalloc((void**) &d_src, sizeof(T) * num_elements), "TimedScan cudaMalloc d_src failed: ", __FILE__, __LINE__)) exit(1); if (util::B40CPerror(cudaMalloc((void**) &d_dest, sizeof(T) * num_elements), "TimedScan cudaMalloc d_dest failed: ", __FILE__, __LINE__)) exit(1); if ((h_data = (T*) malloc(sizeof(T) * num_elements)) == NULL) { fprintf(stderr, "Host malloc of problem data failed\n"); exit(1); } if ((h_reference = (T*) malloc(sizeof(T) * num_elements)) == NULL) { fprintf(stderr, "Host malloc of problem data failed\n"); exit(1); } h_reference[0] = identity_op(); for (SizeT i = 0; i < num_elements; ++i) { // util::RandomBits<T>(h_data[i], 0); h_data[i] = i; h_reference[i] = (i == 0) ? identity_op() : reduction_op(h_reference[i - 1], h_data[i - 1]); } // Move a fresh copy of the problem into device storage if (util::B40CPerror(cudaMemcpy(d_src, h_data, sizeof(T) * num_elements, cudaMemcpyHostToDevice), "TimedScan cudaMemcpy d_src failed: ", __FILE__, __LINE__)) exit(1); // Iterate configuration space IterateConfigSpace(); // Free allocated memory if (d_src) cudaFree(d_src); if (d_dest) cudaFree(d_dest); // Free our allocated host memory if (h_data) free(h_data); if (h_reference) free(h_reference); } }; /****************************************************************************** * Test ******************************************************************************/ /** * Creates an example problem and then dispatches the iterations * to the GPU for the given number of iterations, displaying runtime information. */ template< typename T, typename SizeT, typename ReductionOp, typename IdentityOp> void Test( SizeT num_elements, ReductionOp reduction_op, IdentityOp identity_op) { // Establish the problem types typedef scan::ProblemType< T, SizeT, ReductionOp, IdentityOp, true, // EXCLUSIVE, true> // COMMUTATIVE ProblemType; // Create enactor Enactor<ProblemType> enactor( reduction_op, identity_op); // Run test enactor.Test(num_elements); } /****************************************************************************** * Main ******************************************************************************/ int main(int argc, char** argv) { CommandLineArgs args(argc, argv); DeviceInit(args); // Seed random number generator srand(0); // presently deterministic // Use 32-bit integer for array indexing typedef int SizeT; SizeT num_elements = 1024; // Parse command line arguments if (args.CheckCmdLineFlag("help")) { Usage(); return 0; } args.GetCmdLineArgument("i", g_iterations); args.GetCmdLineArgument("n", num_elements); args.GetCmdLineArgument("max-ctas", g_max_ctas); g_verify = args.CheckCmdLineFlag("verify"); g_verbose = args.CheckCmdLineFlag("v"); util::CudaProperties cuda_props; printf("Test Scan: %d iterations, %lu elements", g_iterations, (unsigned long) num_elements); printf("\nCodeGen: \t[device_sm_version: %d, kernel_ptx_version: %d]\n\n", cuda_props.device_sm_version, cuda_props.kernel_ptx_version); printf("" "TuneID, " "UNIFORM_SMEM_ALLOCATION, " "UNIFORM_GRID_SIZE, " "OVERSUBSCRIBED_GRID_SIZE, " "SCHEDULING_GRANULARITY, " "UPSWEEP_LOG_THREADS, " "UPSWEEP_LOG_LOAD_VEC_SIZE, " "UPSWEEP_LOG_LOADS_PER_TILE, " "SPINE_LOG_THREADS, " "SPINE_LOG_LOAD_VEC_SIZE, " "SPINE_LOG_LOADS_PER_TILE, " "DOWNSWEEP_LOG_THREADS, " "DOWNSWEEP_LOG_LOAD_VEC_SIZE, " "DOWNSWEEP_LOG_LOADS_PER_TILE, " "elapsed time (ms), " "throughput (10^9 items/s), " "bandwidth (10^9 B/s)"); if (g_verify) printf(", Correctness"); printf("\n"); // Execute test(s) #if (TUNE_SIZE == 0) || (TUNE_SIZE == 1) { typedef unsigned char T; Sum<T> binary_op; Test<T>(num_elements * 4, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 2) { typedef unsigned short T; Sum<T> binary_op; Test<T>(num_elements * 2, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 4) { typedef unsigned int T; Sum<T> binary_op; Test<T>(num_elements, binary_op, binary_op); } #endif #if (TUNE_SIZE == 0) || (TUNE_SIZE == 8) { typedef unsigned long long T; Sum<T> binary_op; Test<T>(num_elements / 2, binary_op, binary_op); } #endif return 0; }
eee1e710b42ed6e0ce32b64bd0b0ce3841a6d083.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * KMeans.cpp * * Created on: 8 feb 2021 * Author: marco */ #include "KMeans.h" #include <cmath> #include <cstdio> #include <cassert> #include <iostream> #include <chrono> static void CheckCudaErrorAux(const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define TOLERANCE 0.005 #define BLOCK_SIZE 384 #define SMALL_BLOCK_SIZE 128 extern const unsigned int channels; static unsigned int dimension; static unsigned int numPoints; static float* h_centroidsCoordinates; __constant__ unsigned int c_dimension; __constant__ unsigned int c_numPoints; extern __constant__ float c_centroidsCoordinates[]; __device__ unsigned int d_newCentroidIndex; __device__ float d_maxMinDistance; __device__ unsigned int cudaLock; extern __device__ unsigned int g_clusterSize[]; extern __device__ float g_clusterSum[]; /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement << " returned " << hipGetErrorString(err) << "("<< err << ") at " << file << ":" << line << std::endl; exit(1); } __host__ SetOfPoints* kMeans(unsigned int k, const SetOfPoints& data) noexcept(false) { // extract points Point* points = data.pointList; if(points == nullptr) { throw invalid_argument("Clusters can't be null"); } // extract size numPoints = data.sizeList; CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_numPoints, &numPoints, sizeof(unsigned int))); if (numPoints < k) { throw length_error("There aren't enough points for k = " + to_string(k)); } // extract dimension dimension = data.pointList[0].dimension; CUDA_CHECK_RETURN(hipMemcpyToSymbol(c_dimension, &dimension, sizeof(unsigned int))); assert(dimension == channels); // initialize linear array on device batching the memory transfers and pinning memory float* pointsCoordinates; CUDA_CHECK_RETURN(hipHostMalloc((void**)&(pointsCoordinates), dimension * numPoints * sizeof(float), hipHostMallocMapped)); for (int p = 0; p < numPoints; p++){ memcpy((void*)&pointsCoordinates[p*dimension], (void*)points[p].coordinates, dimension*sizeof(float)); } float* d_pointsCoordinates; CUDA_CHECK_RETURN(hipMalloc((void**)&d_pointsCoordinates, dimension * numPoints * sizeof(float))); CUDA_CHECK_RETURN(hipMemcpy((void*)d_pointsCoordinates, (void*)pointsCoordinates, dimension * numPoints *sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipHostFree(pointsCoordinates)); // initialize centroids CUDA_CHECK_RETURN(hipGetSymbolAddress((void**)&h_centroidsCoordinates, c_centroidsCoordinates)); initialCentroids(k, d_pointsCoordinates); unsigned int* clusterization; CUDA_CHECK_RETURN(hipMallocManaged(&clusterization, numPoints*sizeof(unsigned int))); hipMemPrefetchAsync(clusterization, numPoints*sizeof(unsigned int), -1, NULL); // for post-Maxwell architectures // unsigned int* d_clusterization; // CUDA_CHECK_RETURN(hipMalloc((void**)&d_clusterization, numPoints * sizeof(unsigned int))); unsigned int* d_clusterSize; CUDA_CHECK_RETURN(hipGetSymbolAddress((void**)&d_clusterSize, g_clusterSize)); float* d_clusterSum; CUDA_CHECK_RETURN(hipGetSymbolAddress((void**)&d_clusterSum, g_clusterSum)); bool stop = false; while (!stop) { updateClusters(k, d_pointsCoordinates, /*d_clusterization*/clusterization, d_clusterSize, d_clusterSum); stop = checkStop(k); CUDA_CHECK_RETURN(hipMemcpy((void*)h_centroidsCoordinates, (void*)d_clusterSum, k * dimension * sizeof(float), hipMemcpyDeviceToDevice)); } // unsigned int* clusterization = (unsigned int *) calloc(numPoints, sizeof(unsigned int)); // CUDA_CHECK_RETURN(hipMemcpy((void*)clusterization, (void*)d_clusterization, numPoints * sizeof(unsigned int), hipMemcpyDeviceToHost)); unsigned int* clusterSize = (unsigned int *) calloc(k, sizeof(unsigned int)); CUDA_CHECK_RETURN(hipMemcpy((void*)clusterSize, (void*)d_clusterSize, k * sizeof(unsigned int), hipMemcpyDeviceToHost)); // alloc clusters SetOfPoints* clusters = SetOfPoints_new((Point *) calloc(k, sizeof (Point)), k); unsigned int clusterIndex [k]; for (unsigned int c = 0; c < k; c++) { setAttributes(&(clusters[c]), (Point *) calloc(clusterSize[c], sizeof(Point)), clusterSize[c]); clusterIndex[c] = 0; } for (unsigned int p = 0; p < numPoints; p++) { insertPoint(&(clusters[clusterization[p]]), points[p], clusterIndex[clusterization[p]]); clusterIndex[clusterization[p]]++; } free(clusterSize); CUDA_CHECK_RETURN(hipFree(clusterization)); // free(clusterization); // CUDA_CHECK_RETURN(hipFree(d_clusterization)); CUDA_CHECK_RETURN(hipFree(d_pointsCoordinates)); return clusters; } __global__ void checkStopKernel(unsigned int k, int* d_stop) { // indexes int coordinate = blockIdx.x * blockDim.x + threadIdx.x; // private copies int dimension = c_dimension; // main job if (coordinate < k * dimension) { float actualCoordinate = g_clusterSum[coordinate] / (float) g_clusterSize[coordinate / dimension]; g_clusterSum[coordinate] = actualCoordinate; if (abs(c_centroidsCoordinates[coordinate] - actualCoordinate) > TOLERANCE) { atomicExch(d_stop, 1); } } } __host__ bool checkStop(unsigned int k) { int* stop; CUDA_CHECK_RETURN(hipMallocManaged((void**)&stop, sizeof(int))); static unsigned int gridSize = (k*dimension)/SMALL_BLOCK_SIZE + ((k * dimension) % SMALL_BLOCK_SIZE != 0); // integer ceil hipLaunchKernelGGL(( checkStopKernel), dim3(gridSize), dim3(SMALL_BLOCK_SIZE), 0, 0, k, stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); bool response = *stop ? false : true; CUDA_CHECK_RETURN(hipFree(stop)); return response; } __global__ void updateClustersKernel(unsigned int k, const float* d_pointsCoordinates, unsigned int* d_clusterization) { // indexes unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; // private copies int dimension = c_dimension; int numPoints = c_numPoints; // main job if (index < numPoints) { float minDistance = INFINITY; unsigned int nearestCentroid; float distance; for (unsigned int c = 0; c < k; c++) { distance = getDistanceByCoordinates(&d_pointsCoordinates[index * dimension], &c_centroidsCoordinates[c * dimension], dimension); if (distance < minDistance) { minDistance = distance; nearestCentroid = c; } } d_clusterization[index] = nearestCentroid; atomicAdd(&g_clusterSize[nearestCentroid], 1); for (unsigned int d = 0; d < dimension; d++) { atomicAdd(&g_clusterSum[nearestCentroid * dimension + d], d_pointsCoordinates[index * dimension + d]); } } } __host__ void updateClusters(unsigned int k, float* d_pointsCoordinates, unsigned int* d_clusterization, unsigned int* d_clusterSize, float* d_clusterSum) { CUDA_CHECK_RETURN(hipMemset((void*)d_clusterSize, 0, k * sizeof(unsigned int))); CUDA_CHECK_RETURN(hipMemset((void*)d_clusterSum, 0, k * dimension * sizeof(float))); // blocks-threads organization static unsigned int gridSize = numPoints/BLOCK_SIZE + (numPoints % BLOCK_SIZE != 0); // integer ceil hipLaunchKernelGGL(( updateClustersKernel), dim3(gridSize), dim3(BLOCK_SIZE), k*dimension, 0, k, d_pointsCoordinates, d_clusterization); hipDeviceSynchronize(); } __global__ void maxMinDistanceKernel(unsigned int i, const float* d_pointsCoordinates) { __shared__ float ds_maxMinDistances[BLOCK_SIZE]; __shared__ unsigned int ds_maxMinIndexes[BLOCK_SIZE]; // private copies int dimension = c_dimension; int numPoints = c_numPoints; // indexes unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; // collaborative initialization ds_maxMinDistances[t] = -1; ds_maxMinIndexes[t] = index; // points processing if (index < numPoints) { float minDistance = INFINITY; float distance; for (unsigned int j = 0; j < i; j++) { distance = getDistanceByCoordinates(&d_pointsCoordinates[index * dimension], &c_centroidsCoordinates[j * dimension], dimension); if (distance < minDistance) { minDistance = distance; } } ds_maxMinDistances[t] = minDistance; } // comparing reduction for (unsigned int stride = blockDim.x/2; stride >= 1; stride /= 2) { __syncthreads(); if (t >= stride) { return; // this improves kernel's performance } else { if (ds_maxMinDistances[t] < ds_maxMinDistances[t + stride]) { ds_maxMinDistances[t] = ds_maxMinDistances[t + stride]; ds_maxMinIndexes[t] = ds_maxMinIndexes[t + stride]; } if (stride == 1) { bool blocked = true; while (blocked) { if (0 == atomicCAS(&cudaLock, 0, 1)) { if (d_maxMinDistance < ds_maxMinDistances[0]) { d_maxMinDistance = ds_maxMinDistances[0]; d_newCentroidIndex = ds_maxMinIndexes[0]; } atomicExch(&cudaLock, 0); blocked = false; } } } } } } __host__ void initialCentroids(unsigned int k, float* d_pointsCoordinates) { // first centroid int firstIndex = 0; CUDA_CHECK_RETURN(hipMemcpy((void*)&h_centroidsCoordinates[0 * dimension], (void*)&d_pointsCoordinates[firstIndex * dimension], dimension * sizeof(float), hipMemcpyHostToDevice)); // blocks-threads organization unsigned int gridSize = numPoints/BLOCK_SIZE + (numPoints % BLOCK_SIZE != 0); // integer ceil // kernel call iteration unsigned int h_newCentroidIndex; float h_maxMinDistance; for (unsigned int i = 1; i < k; i++) { h_maxMinDistance = 0; CUDA_CHECK_RETURN(hipMemcpyToSymbol(d_maxMinDistance, &h_maxMinDistance, sizeof(float))); hipLaunchKernelGGL(( maxMinDistanceKernel), dim3(gridSize), dim3(BLOCK_SIZE), 0, 0, i, d_pointsCoordinates); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipMemcpyFromSymbol(&h_newCentroidIndex, d_newCentroidIndex, sizeof(unsigned int))); CUDA_CHECK_RETURN(hipMemcpy((void*)&h_centroidsCoordinates[i * dimension], (void*)&d_pointsCoordinates[h_newCentroidIndex * dimension], dimension * sizeof(float), hipMemcpyHostToDevice)); } }
eee1e710b42ed6e0ce32b64bd0b0ce3841a6d083.cu
/* * KMeans.cpp * * Created on: 8 feb 2021 * Author: marco */ #include "KMeans.h" #include <cmath> #include <cstdio> #include <cassert> #include <iostream> #include <chrono> static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define TOLERANCE 0.005 #define BLOCK_SIZE 384 #define SMALL_BLOCK_SIZE 128 extern const unsigned int channels; static unsigned int dimension; static unsigned int numPoints; static float* h_centroidsCoordinates; __constant__ unsigned int c_dimension; __constant__ unsigned int c_numPoints; extern __constant__ float c_centroidsCoordinates[]; __device__ unsigned int d_newCentroidIndex; __device__ float d_maxMinDistance; __device__ unsigned int cudaLock; extern __device__ unsigned int g_clusterSize[]; extern __device__ float g_clusterSum[]; /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement << " returned " << cudaGetErrorString(err) << "("<< err << ") at " << file << ":" << line << std::endl; exit(1); } __host__ SetOfPoints* kMeans(unsigned int k, const SetOfPoints& data) noexcept(false) { // extract points Point* points = data.pointList; if(points == nullptr) { throw invalid_argument("Clusters can't be null"); } // extract size numPoints = data.sizeList; CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_numPoints, &numPoints, sizeof(unsigned int))); if (numPoints < k) { throw length_error("There aren't enough points for k = " + to_string(k)); } // extract dimension dimension = data.pointList[0].dimension; CUDA_CHECK_RETURN(cudaMemcpyToSymbol(c_dimension, &dimension, sizeof(unsigned int))); assert(dimension == channels); // initialize linear array on device batching the memory transfers and pinning memory float* pointsCoordinates; CUDA_CHECK_RETURN(cudaMallocHost((void**)&(pointsCoordinates), dimension * numPoints * sizeof(float), cudaHostAllocMapped)); for (int p = 0; p < numPoints; p++){ memcpy((void*)&pointsCoordinates[p*dimension], (void*)points[p].coordinates, dimension*sizeof(float)); } float* d_pointsCoordinates; CUDA_CHECK_RETURN(cudaMalloc((void**)&d_pointsCoordinates, dimension * numPoints * sizeof(float))); CUDA_CHECK_RETURN(cudaMemcpy((void*)d_pointsCoordinates, (void*)pointsCoordinates, dimension * numPoints *sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaFreeHost(pointsCoordinates)); // initialize centroids CUDA_CHECK_RETURN(cudaGetSymbolAddress((void**)&h_centroidsCoordinates, c_centroidsCoordinates)); initialCentroids(k, d_pointsCoordinates); unsigned int* clusterization; CUDA_CHECK_RETURN(cudaMallocManaged(&clusterization, numPoints*sizeof(unsigned int))); cudaMemPrefetchAsync(clusterization, numPoints*sizeof(unsigned int), -1, NULL); // for post-Maxwell architectures // unsigned int* d_clusterization; // CUDA_CHECK_RETURN(cudaMalloc((void**)&d_clusterization, numPoints * sizeof(unsigned int))); unsigned int* d_clusterSize; CUDA_CHECK_RETURN(cudaGetSymbolAddress((void**)&d_clusterSize, g_clusterSize)); float* d_clusterSum; CUDA_CHECK_RETURN(cudaGetSymbolAddress((void**)&d_clusterSum, g_clusterSum)); bool stop = false; while (!stop) { updateClusters(k, d_pointsCoordinates, /*d_clusterization*/clusterization, d_clusterSize, d_clusterSum); stop = checkStop(k); CUDA_CHECK_RETURN(cudaMemcpy((void*)h_centroidsCoordinates, (void*)d_clusterSum, k * dimension * sizeof(float), cudaMemcpyDeviceToDevice)); } // unsigned int* clusterization = (unsigned int *) calloc(numPoints, sizeof(unsigned int)); // CUDA_CHECK_RETURN(cudaMemcpy((void*)clusterization, (void*)d_clusterization, numPoints * sizeof(unsigned int), cudaMemcpyDeviceToHost)); unsigned int* clusterSize = (unsigned int *) calloc(k, sizeof(unsigned int)); CUDA_CHECK_RETURN(cudaMemcpy((void*)clusterSize, (void*)d_clusterSize, k * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // alloc clusters SetOfPoints* clusters = SetOfPoints_new((Point *) calloc(k, sizeof (Point)), k); unsigned int clusterIndex [k]; for (unsigned int c = 0; c < k; c++) { setAttributes(&(clusters[c]), (Point *) calloc(clusterSize[c], sizeof(Point)), clusterSize[c]); clusterIndex[c] = 0; } for (unsigned int p = 0; p < numPoints; p++) { insertPoint(&(clusters[clusterization[p]]), points[p], clusterIndex[clusterization[p]]); clusterIndex[clusterization[p]]++; } free(clusterSize); CUDA_CHECK_RETURN(cudaFree(clusterization)); // free(clusterization); // CUDA_CHECK_RETURN(cudaFree(d_clusterization)); CUDA_CHECK_RETURN(cudaFree(d_pointsCoordinates)); return clusters; } __global__ void checkStopKernel(unsigned int k, int* d_stop) { // indexes int coordinate = blockIdx.x * blockDim.x + threadIdx.x; // private copies int dimension = c_dimension; // main job if (coordinate < k * dimension) { float actualCoordinate = g_clusterSum[coordinate] / (float) g_clusterSize[coordinate / dimension]; g_clusterSum[coordinate] = actualCoordinate; if (abs(c_centroidsCoordinates[coordinate] - actualCoordinate) > TOLERANCE) { atomicExch(d_stop, 1); } } } __host__ bool checkStop(unsigned int k) { int* stop; CUDA_CHECK_RETURN(cudaMallocManaged((void**)&stop, sizeof(int))); static unsigned int gridSize = (k*dimension)/SMALL_BLOCK_SIZE + ((k * dimension) % SMALL_BLOCK_SIZE != 0); // integer ceil checkStopKernel<<<gridSize, SMALL_BLOCK_SIZE>>>(k, stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); bool response = *stop ? false : true; CUDA_CHECK_RETURN(cudaFree(stop)); return response; } __global__ void updateClustersKernel(unsigned int k, const float* d_pointsCoordinates, unsigned int* d_clusterization) { // indexes unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; // private copies int dimension = c_dimension; int numPoints = c_numPoints; // main job if (index < numPoints) { float minDistance = INFINITY; unsigned int nearestCentroid; float distance; for (unsigned int c = 0; c < k; c++) { distance = getDistanceByCoordinates(&d_pointsCoordinates[index * dimension], &c_centroidsCoordinates[c * dimension], dimension); if (distance < minDistance) { minDistance = distance; nearestCentroid = c; } } d_clusterization[index] = nearestCentroid; atomicAdd(&g_clusterSize[nearestCentroid], 1); for (unsigned int d = 0; d < dimension; d++) { atomicAdd(&g_clusterSum[nearestCentroid * dimension + d], d_pointsCoordinates[index * dimension + d]); } } } __host__ void updateClusters(unsigned int k, float* d_pointsCoordinates, unsigned int* d_clusterization, unsigned int* d_clusterSize, float* d_clusterSum) { CUDA_CHECK_RETURN(cudaMemset((void*)d_clusterSize, 0, k * sizeof(unsigned int))); CUDA_CHECK_RETURN(cudaMemset((void*)d_clusterSum, 0, k * dimension * sizeof(float))); // blocks-threads organization static unsigned int gridSize = numPoints/BLOCK_SIZE + (numPoints % BLOCK_SIZE != 0); // integer ceil updateClustersKernel<<<gridSize, BLOCK_SIZE, k*dimension>>>(k, d_pointsCoordinates, d_clusterization); cudaDeviceSynchronize(); } __global__ void maxMinDistanceKernel(unsigned int i, const float* d_pointsCoordinates) { __shared__ float ds_maxMinDistances[BLOCK_SIZE]; __shared__ unsigned int ds_maxMinIndexes[BLOCK_SIZE]; // private copies int dimension = c_dimension; int numPoints = c_numPoints; // indexes unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; unsigned int t = threadIdx.x; // collaborative initialization ds_maxMinDistances[t] = -1; ds_maxMinIndexes[t] = index; // points processing if (index < numPoints) { float minDistance = INFINITY; float distance; for (unsigned int j = 0; j < i; j++) { distance = getDistanceByCoordinates(&d_pointsCoordinates[index * dimension], &c_centroidsCoordinates[j * dimension], dimension); if (distance < minDistance) { minDistance = distance; } } ds_maxMinDistances[t] = minDistance; } // comparing reduction for (unsigned int stride = blockDim.x/2; stride >= 1; stride /= 2) { __syncthreads(); if (t >= stride) { return; // this improves kernel's performance } else { if (ds_maxMinDistances[t] < ds_maxMinDistances[t + stride]) { ds_maxMinDistances[t] = ds_maxMinDistances[t + stride]; ds_maxMinIndexes[t] = ds_maxMinIndexes[t + stride]; } if (stride == 1) { bool blocked = true; while (blocked) { if (0 == atomicCAS(&cudaLock, 0, 1)) { if (d_maxMinDistance < ds_maxMinDistances[0]) { d_maxMinDistance = ds_maxMinDistances[0]; d_newCentroidIndex = ds_maxMinIndexes[0]; } atomicExch(&cudaLock, 0); blocked = false; } } } } } } __host__ void initialCentroids(unsigned int k, float* d_pointsCoordinates) { // first centroid int firstIndex = 0; CUDA_CHECK_RETURN(cudaMemcpy((void*)&h_centroidsCoordinates[0 * dimension], (void*)&d_pointsCoordinates[firstIndex * dimension], dimension * sizeof(float), cudaMemcpyHostToDevice)); // blocks-threads organization unsigned int gridSize = numPoints/BLOCK_SIZE + (numPoints % BLOCK_SIZE != 0); // integer ceil // kernel call iteration unsigned int h_newCentroidIndex; float h_maxMinDistance; for (unsigned int i = 1; i < k; i++) { h_maxMinDistance = 0; CUDA_CHECK_RETURN(cudaMemcpyToSymbol(d_maxMinDistance, &h_maxMinDistance, sizeof(float))); maxMinDistanceKernel<<<gridSize, BLOCK_SIZE>>>(i, d_pointsCoordinates); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaMemcpyFromSymbol(&h_newCentroidIndex, d_newCentroidIndex, sizeof(unsigned int))); CUDA_CHECK_RETURN(cudaMemcpy((void*)&h_centroidsCoordinates[i * dimension], (void*)&d_pointsCoordinates[h_newCentroidIndex * dimension], dimension * sizeof(float), cudaMemcpyHostToDevice)); } }
c0a59f2d48f66f7b72a079d6deef55aebbaaaa98.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void GPUAdd(float *array1, float *array2, float *result, int WIDTH) { int i = blockDim.x * blockIdx.x + threadIdx.x; result[i] = array1[i] + array2[i]; }
c0a59f2d48f66f7b72a079d6deef55aebbaaaa98.cu
#include "includes.h" __global__ void GPUAdd(float *array1, float *array2, float *result, int WIDTH) { int i = blockDim.x * blockIdx.x + threadIdx.x; result[i] = array1[i] + array2[i]; }
ae31b17b35ebbecf09746a6ae69ea7f2213b7da6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include<stdio.h> #include<math.h> #include<time.h> #include <stdlib.h> int Max=16384; int width=32; typedef struct { double A1; double A2; double A3; double A4; }stru; __global__ void multi(stru *A,stru *b,double *C,const int Max){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; double sum=0.0; if(idx<Max && idy<Max && idx==idy){ for(int i=0;i<Max/4;i++){ sum+=A[idx*Max/4+i].A1*b[i*4].A1+A[idx*Max/4+i].A2*b[i*4+1].A1+A[idx*Max/4+i].A3*b[i*4+2].A1+A[idx*Max/4+i].A4*b[i*4+3].A1; } C[idx]=sum; } } int main(){ printf(":\n"); stru *A =(stru *)malloc(Max * Max/4 * sizeof(stru)); //A stru *b =(stru *)malloc(Max * sizeof(stru)); //b double *C =(double *)malloc(Max * sizeof(double)); //C double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test int i,j; for(i=0;i<Max;i++){ for(j=0;j<Max/4;j++){ A[i*Max/4+j].A1=i-0.1*j*4+1; A[i*Max/4+j].A2=i-(0.1*j*4+1)+1; A[i*Max/4+j].A3=i-(0.1*j*4+2)+1; A[i*Max/4+j].A4=i-(0.1*j*4+3)+1; } } for(i=0;i<Max;i++){ b[i].A1=log(sqrt(i*i-i+2)); b[i].A2=0.0; b[i].A3=0.0; b[i].A4=0.0; C[i]=0.0; } stru *A_d,*b_d; double *C_d; hipMalloc((void **)&A_d,Max * Max/4 * sizeof(stru)); hipMalloc((void **)&b_d,Max *sizeof(stru)); hipMalloc((void **)&C_d,Max *sizeof(double)); clock_t start,end; start=clock(); hipMemcpy(A_d, A,Max*Max/4*sizeof(stru),hipMemcpyHostToDevice); hipMemcpy(b_d, b,Max*sizeof(stru),hipMemcpyHostToDevice); hipMemcpy(C_d, C,Max * sizeof(double), hipMemcpyHostToDevice); dim3 block(width,width); dim3 grid(Max/block.x, Max/block.y); hipLaunchKernelGGL(( multi), dim3(grid),dim3(block), 0, 0, A_d,b_d,C_d,Max); hipMemcpy(C, C_d, Max * sizeof(double), hipMemcpyDeviceToHost); end=clock(); double time=(end-start)*1000/CLOCKS_PER_SEC; //check result: //cpu: clock_t start_c,end_c; start_c=clock(); for (int i = 0; i < Max; ++i){ for (int j = 0; j < Max/4; ++j) { test_c[i]+=A[i*Max/4+j].A1*b[j*4].A1+A[i*Max/4+j].A2*b[j*4+1].A1+A[i*Max/4+j].A3*b[j*4+2].A1+A[i*Max/4+j].A4*b[j*4+3].A1; } } end_c=clock(); bool flag = true; for (int i = 0; i < Max; ++i){ float a=test_c[i]; float b=C[i]; if (a!=b) { printf("cpu:%lf gpu:%lf\n",a,b); flag = false; } } if (flag == true) printf("result correct\n"); else{ printf("resul wrong\n"); } double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC; printf("GPU TIME:%lf ms\n",time); printf("CPU TIME:%lf ms\n",time_C); hipFree(A_d); hipFree(b_d); hipFree(C_d); free(A); free(b); free(C); }
ae31b17b35ebbecf09746a6ae69ea7f2213b7da6.cu
//合并 访存 #include<stdio.h> #include<math.h> #include<time.h> #include <stdlib.h> int Max=16384; int width=32; typedef struct { double A1; double A2; double A3; double A4; }stru; __global__ void multi(stru *A,stru *b,double *C,const int Max){ int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; double sum=0.0; if(idx<Max && idy<Max && idx==idy){ for(int i=0;i<Max/4;i++){ sum+=A[idx*Max/4+i].A1*b[i*4].A1+A[idx*Max/4+i].A2*b[i*4+1].A1+A[idx*Max/4+i].A3*b[i*4+2].A1+A[idx*Max/4+i].A4*b[i*4+3].A1; } C[idx]=sum; } } int main(){ printf("合并访存:\n"); stru *A =(stru *)malloc(Max * Max/4 * sizeof(stru)); //A stru *b =(stru *)malloc(Max * sizeof(stru)); //b double *C =(double *)malloc(Max * sizeof(double)); //C double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test int i,j; for(i=0;i<Max;i++){ for(j=0;j<Max/4;j++){ A[i*Max/4+j].A1=i-0.1*j*4+1; A[i*Max/4+j].A2=i-(0.1*j*4+1)+1; A[i*Max/4+j].A3=i-(0.1*j*4+2)+1; A[i*Max/4+j].A4=i-(0.1*j*4+3)+1; } } for(i=0;i<Max;i++){ b[i].A1=log(sqrt(i*i-i+2)); b[i].A2=0.0; b[i].A3=0.0; b[i].A4=0.0; C[i]=0.0; } stru *A_d,*b_d; double *C_d; cudaMalloc((void **)&A_d,Max * Max/4 * sizeof(stru)); cudaMalloc((void **)&b_d,Max *sizeof(stru)); cudaMalloc((void **)&C_d,Max *sizeof(double)); clock_t start,end; start=clock(); cudaMemcpy(A_d, A,Max*Max/4*sizeof(stru),cudaMemcpyHostToDevice); cudaMemcpy(b_d, b,Max*sizeof(stru),cudaMemcpyHostToDevice); cudaMemcpy(C_d, C,Max * sizeof(double), cudaMemcpyHostToDevice); dim3 block(width,width); dim3 grid(Max/block.x, Max/block.y); multi<<<grid,block>>>(A_d,b_d,C_d,Max); cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost); end=clock(); double time=(end-start)*1000/CLOCKS_PER_SEC; //check result: //cpu: clock_t start_c,end_c; start_c=clock(); for (int i = 0; i < Max; ++i){ for (int j = 0; j < Max/4; ++j) { test_c[i]+=A[i*Max/4+j].A1*b[j*4].A1+A[i*Max/4+j].A2*b[j*4+1].A1+A[i*Max/4+j].A3*b[j*4+2].A1+A[i*Max/4+j].A4*b[j*4+3].A1; } } end_c=clock(); bool flag = true; for (int i = 0; i < Max; ++i){ float a=test_c[i]; float b=C[i]; if (a!=b) { printf("cpu:%lf gpu:%lf\n",a,b); flag = false; } } if (flag == true) printf("result correct\n"); else{ printf("resul wrong\n"); } double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC; printf("GPU TIME:%lf ms\n",time); printf("CPU TIME:%lf ms\n",time_C); cudaFree(A_d); cudaFree(b_d); cudaFree(C_d); free(A); free(b); free(C); }
74f982985b032b7ffa9612170a60498785572c04.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cassert> #include <iostream> #include <limits> #include <random> #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" using namespace cms::cuda; template <typename T> void go() { std::mt19937 eng; std::uniform_int_distribution<T> rgen(std::numeric_limits<T>::min(), std::numeric_limits<T>::max()); constexpr int N = 12000; T v[N]; auto v_d = make_device_unique<T[]>(N, nullptr); cudaCheck(hipMemcpy(v_d.get(), v, N * sizeof(T), hipMemcpyHostToDevice)); constexpr uint32_t nParts = 10; constexpr uint32_t partSize = N / nParts; uint32_t offsets[nParts + 1]; using Hist = HistoContainer<T, 128, -1, 8 * sizeof(T), uint32_t, nParts>; std::cout << "HistoContainer " << (int)(offsetof(Hist, off)) << ' ' << Hist::nbins() << ' ' << Hist::totbins() << ' ' << Hist::ctCapacity() << ' ' << offsetof(Hist, content) - offsetof(Hist, off) << ' ' << (std::numeric_limits<T>::max() - std::numeric_limits<T>::min()) / Hist::nbins() << std::endl; Hist h; uint32_t mem[N]; auto h_d = make_device_unique<Hist[]>(1, nullptr); auto h_s = make_device_unique<uint32_t[]>(N, nullptr); // auto h_s = make_device_unique<Hist::index_type[]>(N, nullptr); auto off_d = make_device_unique<uint32_t[]>(nParts + 1, nullptr); for (int it = 0; it < 5; ++it) { offsets[0] = 0; for (uint32_t j = 1; j < nParts + 1; ++j) { offsets[j] = offsets[j - 1] + partSize - 3 * j; assert(offsets[j] <= N); } if (it == 1) { // special cases... offsets[0] = 0; offsets[1] = 0; offsets[2] = 19; offsets[3] = 32 + offsets[2]; offsets[4] = 123 + offsets[3]; offsets[5] = 256 + offsets[4]; offsets[6] = 311 + offsets[5]; offsets[7] = 2111 + offsets[6]; offsets[8] = 256 * 11 + offsets[7]; offsets[9] = 44 + offsets[8]; offsets[10] = 3297 + offsets[9]; assert(offsets[10] <= N); } cudaCheck(hipMemcpy(off_d.get(), offsets, 4 * (nParts + 1), hipMemcpyHostToDevice)); for (long long j = 0; j < N; j++) v[j] = rgen(eng); if (it == 2) { // big bin for (long long j = 1000; j < 2000; j++) v[j] = sizeof(T) == 1 ? 22 : 3456; } cudaCheck(hipMemcpy(v_d.get(), v, N * sizeof(T), hipMemcpyHostToDevice)); fillManyFromVector(h_d.get(), nParts, v_d.get(), off_d.get(), offsets[10], 256, h_s.get(), 0); cudaCheck(hipMemcpy(&h, h_d.get(), sizeof(Hist), hipMemcpyDeviceToHost)); assert(h.capacity() == offsets[10]); // get content cudaCheck(hipMemcpy(mem, h_s.get(), N * sizeof(uint32_t), hipMemcpyDeviceToHost)); typename Hist::View view = {&h, nullptr, mem, -1, N}; // plug correct content h.initStorage(view); assert(0 == h.off[0]); assert(offsets[10] == h.size()); auto verify = [&](uint32_t i, uint32_t k, uint32_t t1, uint32_t t2) { assert(t1 < N); assert(t2 < N); if (T(v[t1] - v[t2]) <= 0) std::cout << "for " << i << ':' << v[k] << " failed " << v[t1] << ' ' << v[t2] << std::endl; }; auto incr = [](auto& k) { return k = (k + 1) % Hist::nbins(); }; // make sure it spans 3 bins... auto window = T(1300); for (uint32_t j = 0; j < nParts; ++j) { auto off = Hist::histOff(j); for (uint32_t i = 0; i < Hist::nbins(); ++i) { auto ii = i + off; if (0 == h.size(ii)) continue; auto k = *h.begin(ii); if (j % 2) k = *(h.begin(ii) + (h.end(ii) - h.begin(ii)) / 2); auto bk = h.bin(v[k]); assert(bk == i); assert(k < offsets[j + 1]); auto kl = h.bin(v[k] - window); auto kh = h.bin(v[k] + window); assert(kl != i); assert(kh != i); // std::cout << kl << ' ' << kh << std::endl; auto me = v[k]; auto tot = 0; auto nm = 0; bool l = true; auto khh = kh; incr(khh); for (auto kk = kl; kk != khh; incr(kk)) { if (kk != kl && kk != kh) nm += h.size(kk + off); for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) { if (::min(std::abs(T(v[*p] - me)), std::abs(T(me - v[*p]))) > window) { } else { ++tot; } } if (kk == i) { l = false; continue; } if (l) for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) verify(i, k, k, (*p)); else for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) verify(i, k, (*p), k); } if (!(tot >= nm)) { std::cout << "too bad " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm << std::endl; } if (l) std::cout << "what? " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm << std::endl; assert(!l); } } } } int main() { cms::cudatest::requireDevices(); go<int16_t>(); go<int8_t>(); return 0; }
74f982985b032b7ffa9612170a60498785572c04.cu
#include <algorithm> #include <cassert> #include <iostream> #include <limits> #include <random> #include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h" #include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h" #include "HeterogeneousCore/CUDAUtilities/interface/HistoContainer.h" #include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h" using namespace cms::cuda; template <typename T> void go() { std::mt19937 eng; std::uniform_int_distribution<T> rgen(std::numeric_limits<T>::min(), std::numeric_limits<T>::max()); constexpr int N = 12000; T v[N]; auto v_d = make_device_unique<T[]>(N, nullptr); cudaCheck(cudaMemcpy(v_d.get(), v, N * sizeof(T), cudaMemcpyHostToDevice)); constexpr uint32_t nParts = 10; constexpr uint32_t partSize = N / nParts; uint32_t offsets[nParts + 1]; using Hist = HistoContainer<T, 128, -1, 8 * sizeof(T), uint32_t, nParts>; std::cout << "HistoContainer " << (int)(offsetof(Hist, off)) << ' ' << Hist::nbins() << ' ' << Hist::totbins() << ' ' << Hist::ctCapacity() << ' ' << offsetof(Hist, content) - offsetof(Hist, off) << ' ' << (std::numeric_limits<T>::max() - std::numeric_limits<T>::min()) / Hist::nbins() << std::endl; Hist h; uint32_t mem[N]; auto h_d = make_device_unique<Hist[]>(1, nullptr); auto h_s = make_device_unique<uint32_t[]>(N, nullptr); // auto h_s = make_device_unique<Hist::index_type[]>(N, nullptr); auto off_d = make_device_unique<uint32_t[]>(nParts + 1, nullptr); for (int it = 0; it < 5; ++it) { offsets[0] = 0; for (uint32_t j = 1; j < nParts + 1; ++j) { offsets[j] = offsets[j - 1] + partSize - 3 * j; assert(offsets[j] <= N); } if (it == 1) { // special cases... offsets[0] = 0; offsets[1] = 0; offsets[2] = 19; offsets[3] = 32 + offsets[2]; offsets[4] = 123 + offsets[3]; offsets[5] = 256 + offsets[4]; offsets[6] = 311 + offsets[5]; offsets[7] = 2111 + offsets[6]; offsets[8] = 256 * 11 + offsets[7]; offsets[9] = 44 + offsets[8]; offsets[10] = 3297 + offsets[9]; assert(offsets[10] <= N); } cudaCheck(cudaMemcpy(off_d.get(), offsets, 4 * (nParts + 1), cudaMemcpyHostToDevice)); for (long long j = 0; j < N; j++) v[j] = rgen(eng); if (it == 2) { // big bin for (long long j = 1000; j < 2000; j++) v[j] = sizeof(T) == 1 ? 22 : 3456; } cudaCheck(cudaMemcpy(v_d.get(), v, N * sizeof(T), cudaMemcpyHostToDevice)); fillManyFromVector(h_d.get(), nParts, v_d.get(), off_d.get(), offsets[10], 256, h_s.get(), 0); cudaCheck(cudaMemcpy(&h, h_d.get(), sizeof(Hist), cudaMemcpyDeviceToHost)); assert(h.capacity() == offsets[10]); // get content cudaCheck(cudaMemcpy(mem, h_s.get(), N * sizeof(uint32_t), cudaMemcpyDeviceToHost)); typename Hist::View view = {&h, nullptr, mem, -1, N}; // plug correct content h.initStorage(view); assert(0 == h.off[0]); assert(offsets[10] == h.size()); auto verify = [&](uint32_t i, uint32_t k, uint32_t t1, uint32_t t2) { assert(t1 < N); assert(t2 < N); if (T(v[t1] - v[t2]) <= 0) std::cout << "for " << i << ':' << v[k] << " failed " << v[t1] << ' ' << v[t2] << std::endl; }; auto incr = [](auto& k) { return k = (k + 1) % Hist::nbins(); }; // make sure it spans 3 bins... auto window = T(1300); for (uint32_t j = 0; j < nParts; ++j) { auto off = Hist::histOff(j); for (uint32_t i = 0; i < Hist::nbins(); ++i) { auto ii = i + off; if (0 == h.size(ii)) continue; auto k = *h.begin(ii); if (j % 2) k = *(h.begin(ii) + (h.end(ii) - h.begin(ii)) / 2); auto bk = h.bin(v[k]); assert(bk == i); assert(k < offsets[j + 1]); auto kl = h.bin(v[k] - window); auto kh = h.bin(v[k] + window); assert(kl != i); assert(kh != i); // std::cout << kl << ' ' << kh << std::endl; auto me = v[k]; auto tot = 0; auto nm = 0; bool l = true; auto khh = kh; incr(khh); for (auto kk = kl; kk != khh; incr(kk)) { if (kk != kl && kk != kh) nm += h.size(kk + off); for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) { if (std::min(std::abs(T(v[*p] - me)), std::abs(T(me - v[*p]))) > window) { } else { ++tot; } } if (kk == i) { l = false; continue; } if (l) for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) verify(i, k, k, (*p)); else for (auto p = h.begin(kk + off); p < h.end(kk + off); ++p) verify(i, k, (*p), k); } if (!(tot >= nm)) { std::cout << "too bad " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm << std::endl; } if (l) std::cout << "what? " << j << ' ' << i << ' ' << int(me) << '/' << (int)T(me - window) << '/' << (int)T(me + window) << ": " << kl << '/' << kh << ' ' << khh << ' ' << tot << '/' << nm << std::endl; assert(!l); } } } } int main() { cms::cudatest::requireDevices(); go<int16_t>(); go<int8_t>(); return 0; }
ee2a02d9652ff096c975e0611c3c8246c3062cff.hip
// !!! This is a file automatically generated by hipify!!! // Jordan Cazamias // CUDA World Gen 2015 #include <iostream> #include <ctime> #include <stdlib.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" using namespace std; __global__ void AddInts(int *a, int *b, int count) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < count) { a[id] += b[id]; } //printf("id: %d\n", id); } /* int main() { srand(time(NULL)); int count = 100; int *h_a = new int[count]; int *h_b = new int[count]; for (int i = 0; i < count; i++) { h_a[i] = rand() % 1000; h_b[i] = rand() % 1000; } cout << "Prior to addition:" << endl; for (int i = 0; i < 5; i++) { cout << i << ": " << h_a[i] << " " << h_b[i] << endl; } int *d_a, *d_b; if (hipMalloc(&d_a, sizeof(int) * count) != hipSuccess) { cout << "CUDA Malloc failed!"; return 1; } if (hipMalloc(&d_b, sizeof(int) * count) != hipSuccess) { cout << "CUDA Malloc failed!"; hipFree(d_a); return 1; } if (hipMemcpy(d_a, h_a, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess) { cout << "CUDA copy to device failed!"; hipFree(d_a); hipFree(d_b); return 1; } if (hipMemcpy(d_b, h_b, sizeof(int)*count, hipMemcpyHostToDevice) != hipSuccess) { cout << "CUDA copy to device failed!"; hipFree(d_a); hipFree(d_b); return 1; } // Add integers together int blocks = count / 256 + 1; int threads = 256; AddInts<<<blocks, threads>>>(d_a, d_b, count); if (hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess) { cout << "CUDA copy to host failed!"; hipFree(d_a); hipFree(d_b); return 1; } if (hipMemcpy(h_a, d_a, sizeof(int)*count, hipMemcpyDeviceToHost) != hipSuccess) { cout << "CUDA copy to host failed!"; hipFree(d_a); hipFree(d_b); return 1; } for (int i = 0; i < 5; i++) { cout << "Ans: " << h_a[i] << endl; } delete[] h_a; delete[] h_b; return 0; } */
ee2a02d9652ff096c975e0611c3c8246c3062cff.cu
// Jordan Cazamias // CUDA World Gen 2015 #include <iostream> #include <ctime> #include <stdlib.h> #include "cuda_runtime.h" #include "cuda_runtime_api.h" using namespace std; __global__ void AddInts(int *a, int *b, int count) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < count) { a[id] += b[id]; } //printf("id: %d\n", id); } /* int main() { srand(time(NULL)); int count = 100; int *h_a = new int[count]; int *h_b = new int[count]; for (int i = 0; i < count; i++) { h_a[i] = rand() % 1000; h_b[i] = rand() % 1000; } cout << "Prior to addition:" << endl; for (int i = 0; i < 5; i++) { cout << i << ": " << h_a[i] << " " << h_b[i] << endl; } int *d_a, *d_b; if (cudaMalloc(&d_a, sizeof(int) * count) != cudaSuccess) { cout << "CUDA Malloc failed!"; return 1; } if (cudaMalloc(&d_b, sizeof(int) * count) != cudaSuccess) { cout << "CUDA Malloc failed!"; cudaFree(d_a); return 1; } if (cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "CUDA copy to device failed!"; cudaFree(d_a); cudaFree(d_b); return 1; } if (cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "CUDA copy to device failed!"; cudaFree(d_a); cudaFree(d_b); return 1; } // Add integers together int blocks = count / 256 + 1; int threads = 256; AddInts<<<blocks, threads>>>(d_a, d_b, count); if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess) { cout << "CUDA copy to host failed!"; cudaFree(d_a); cudaFree(d_b); return 1; } if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess) { cout << "CUDA copy to host failed!"; cudaFree(d_a); cudaFree(d_b); return 1; } for (int i = 0; i < 5; i++) { cout << "Ans: " << h_a[i] << endl; } delete[] h_a; delete[] h_b; return 0; } */
c086d7eb80ad92020138f32ee9c8d816083e837d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "copy.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *src = NULL; hipMalloc(&src, XSIZE*YSIZE); int *dest = NULL; hipMalloc(&dest, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( copy), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dest); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( copy), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dest); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( copy), dim3(gridBlock),dim3(threadBlock), 0, 0, src,dest); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c086d7eb80ad92020138f32ee9c8d816083e837d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "copy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *src = NULL; cudaMalloc(&src, XSIZE*YSIZE); int *dest = NULL; cudaMalloc(&dest, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); copy<<<gridBlock,threadBlock>>>(src,dest); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { copy<<<gridBlock,threadBlock>>>(src,dest); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { copy<<<gridBlock,threadBlock>>>(src,dest); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b8219c37311374e8b6b8072cac6e766820fe0a67.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <cmath> #include <hip/hip_runtime.h> //use INSTANTIATE when kernel is called from other files #define INSTANTIATE_LAYER_GPU_FORWARD(func) \ template __global__ void func<float>(float* d_out, const float* d_in, \ const float* d_kernel, const float* d_kernel_bias); \ template __global__ void func<double>(double* d_out, const double* d_in, \ const double* d_kernel, const double* d_kernel_bias); using namespace std; template <typename Dtype> int ConvKernel(string name, Dtype* d_out, const Dtype* d_in, const Dtype* d_kernel, const Dtype* d_kernel_bias){ cout << "Calling Conv Kernel " << name << endl; cerr<<"No Matching Convolution Code"<<endl; return -1; } template int ConvKernel<float>(string name, float* d_out, const float* d_in, const float* d_kernel, const float* d_kernel_bias); template int ConvKernel<double>(string name, double* d_out, const double* d_in, const double* d_kernel, const double* d_kernel_bias);
b8219c37311374e8b6b8072cac6e766820fe0a67.cu
#include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <cmath> #include <cuda_runtime.h> //use INSTANTIATE when kernel is called from other files #define INSTANTIATE_LAYER_GPU_FORWARD(func) \ template __global__ void func<float>(float* d_out, const float* d_in, \ const float* d_kernel, const float* d_kernel_bias); \ template __global__ void func<double>(double* d_out, const double* d_in, \ const double* d_kernel, const double* d_kernel_bias); using namespace std; template <typename Dtype> int ConvKernel(string name, Dtype* d_out, const Dtype* d_in, const Dtype* d_kernel, const Dtype* d_kernel_bias){ cout << "Calling Conv Kernel " << name << endl; cerr<<"No Matching Convolution Code"<<endl; return -1; } template int ConvKernel<float>(string name, float* d_out, const float* d_in, const float* d_kernel, const float* d_kernel_bias); template int ConvKernel<double>(string name, double* d_out, const double* d_in, const double* d_kernel, const double* d_kernel_bias);
00f1972302402e0d9cd1380bc3184df78c211437.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * [email protected] or [email protected] * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ #ifndef __MRLIB_CU__ #define __MRLIB_CU__ #include "MarsInc.h" #include "map.h" #include "reduce.h" #include <helper_cuda.h> //---------------------------------------------- //Get default runtime configuration // //return: default spec //---------------------------------------------- Spec_t *GetDefaultSpec() { Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t)); if (NULL == spec) exit(-1); memset(spec, 0, sizeof(Spec_t)); return spec; } //-------------------------------------------------------- //Initiate map reduce spec //-------------------------------------------------------- void InitMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->dimBlockMap <= 0) g_spec->dimBlockMap = DEFAULT_DIMBLOCK; if (g_spec->dimBlockReduce <= 0) g_spec->dimBlockReduce = DEFAULT_DIMBLOCK; if (g_spec->numRecTaskReduce <= 0) g_spec->numRecTaskReduce = DEFAULT_NUMTASK; if (g_spec->numRecTaskMap <= 0) g_spec->numRecTaskMap = DEFAULT_NUMTASK; if (g_spec->workflow <= 0) g_spec->workflow = MAP_ONLY; } //-------------------------------------------------- //Add a map input record // //param : spec //param : key -- a pointer to a buffer //param : val -- a pointer to a buffer //param : keySize //param : valSize //-------------------------------------------------- void AddMapInputRecord(Spec_t* spec, void* key, void* val, int keySize, int valSize) { assert(NULL != spec); static int2 curOffset; static int3 curChunkNum; int index = spec->inputRecordCount; const int dataChunkSize = 1024*1024*256; if (spec->inputRecordCount > 0) { if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize)) spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize); memcpy(spec->inputKeys+curOffset.x, key, keySize); if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize)) spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize); memcpy(spec->inputVals+curOffset.y, val, valSize); if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4)) spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes, (++curChunkNum.z)*dataChunkSize); } else { spec->inputKeys = (char*)malloc(dataChunkSize); if (NULL == spec->inputKeys) exit(-1); memcpy(spec->inputKeys, key, keySize); spec->inputVals = (char*)malloc(dataChunkSize); if (NULL == spec->inputVals) exit(-1); memcpy(spec->inputVals, val, valSize); spec->inputOffsetSizes = (int4*)malloc(dataChunkSize); curChunkNum.x++; curChunkNum.y++; curChunkNum.z++; } spec->inputOffsetSizes[index].x = curOffset.x; spec->inputOffsetSizes[index].y = keySize; spec->inputOffsetSizes[index].z = curOffset.y; spec->inputOffsetSizes[index].w = valSize; curOffset.x += keySize; curOffset.y += valSize; spec->inputRecordCount++; } //------------------------------------------------- //Called by user defined map_count function // //param : keySize //param : valSize //param : interKeysSizePerTask //param : interValsSizePerTask //param : interCountPerTask //------------------------------------------------- __device__ void EmitInterCount(int keySize, int valSize, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask) { int index = TID; interKeysSizePerTask[index] += keySize; interValsSizePerTask[index] += valSize; interCountPerTask[index]++; } //------------------------------------------------- //called by user defined map function // //------------------------------------------------- __device__ void EmitIntermediate(void* key, void* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; int2 l_keyValOffsets = keyValOffsets[index]; char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x); char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y); char* sKey = (char*)key; char* sVal = (char*)val; for (int i = 0; i < keySize; ++i) pKeySet[i] = sKey[i]; for (int i = 0; i < valSize; ++i) pValSet[i] = sVal[i]; l_keyValOffsets.x += keySize; l_keyValOffsets.y += valSize; keyValOffsets[index] = l_keyValOffsets; int l_curIndex = curIndex[index]; int l_psCounts = psCounts[index]; int l_curPs = l_curIndex + l_psCounts; int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs]; int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1]; if (l_curIndex != 0) { l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y); l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w); } l_interOffsetSizes1.y = keySize; l_interOffsetSizes1.w = valSize; interOffsetSizes[l_curPs] = l_interOffsetSizes1; ++l_curIndex; curIndex[index] = l_curIndex; } //------------------------------------------------- //Calculate intermediate data's size // //param : inputKeys //param : inputVals //param : inputOffsetSizes //param : interKeysSizesPerTask //param : interValsSizePerTask //param : interCountPerTask //param : recordNum -- total number of records //param : recordsPerTask //------------------------------------------------- __global__ void MapperCount(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map_count(key, val, offsetSize.y, offsetSize.w, interKeysSizePerTask, interValsSizePerTask, interCountPerTask); } } //-------------------------------------------------- //mapper //-------------------------------------------------- __global__ void Mapper(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; int l_psCounts = psCounts[index]; int4 l_interOffsetSizes = interOffsetSizes[l_psCounts]; l_interOffsetSizes.x = psKeySizes[index]; l_interOffsetSizes.z = psValSizes[index]; interOffsetSizes[l_psCounts] = l_interOffsetSizes; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map(key, val, offsetSize.y, offsetSize.w, psKeySizes, psValSizes, psCounts, keyValOffsets, interKeys, interVals, interOffsetSizes, curIndex); } } //-------------------------------------------------- //start map // //1, get map input data on host //2, upload map input data to device memory // (keys, vals, keyOffsets, valOffsets, keySizes, valSizes) //3, determine the number of threads to run //4, calculate intermediate data keys'buf size // and values' buf size //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //6, allocate intermediate memory on device memory //7, start map //8, free allocated memory //-------------------------------------------------- int startMap(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);} if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); } if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); } if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);} //------------------------------------------------------- //1, get map input data on host //------------------------------------------------------- int h_inputRecordCount = g_spec->inputRecordCount; int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x + g_spec->inputOffsetSizes[h_inputRecordCount-1].y; int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z + g_spec->inputOffsetSizes[h_inputRecordCount-1].w; char* h_inputKeys = g_spec->inputKeys; char* h_inputVals = g_spec->inputVals; int4* h_inputOffsetSizes = g_spec->inputOffsetSizes; DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount); //------------------------------------------------------- //2, upload map input data onto device memory //------------------------------------------------------- DoLog( "** Upload map input data onto device memory"); TimeVal_t uploadTv; startTimer(&uploadTv); char* d_inputKeys = NULL; char* d_inputVals = NULL; int4* d_inputOffsetSizes = NULL; checkCudaErrors(hipMalloc((void**)&d_inputKeys, h_inputKeysBufSize)); checkCudaErrors(hipMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&d_inputVals, h_inputValsBufSize)); checkCudaErrors(hipMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount)); hipMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, hipMemcpyHostToDevice); endTimer("PCI-E I/O", &uploadTv); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockMap,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskMap; int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; TimeVal_t mapTimer; startTimer(&mapTimer); //---------------------------------------------- //4, calculate intermediate data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** MapCount"); int* d_interKeysSizePerTask = NULL; checkCudaErrors(hipMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interValsSizePerTask = NULL; checkCudaErrors(hipMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interCountPerTask = NULL; checkCudaErrors(hipMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads)); hipMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads); hipLaunchKernelGGL(( MapperCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_inputKeys, d_inputVals, d_inputOffsetSizes, d_interKeysSizePerTask, d_interValsSizePerTask, d_interCountPerTask, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //----------------------------------------------- //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on intermediate data's size\n"); int *d_psKeySizes = NULL; checkCudaErrors(hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads)); int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; checkCudaErrors(hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads)); int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; checkCudaErrors(hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads)); int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts); if (h_allCounts == 0) { DoLog( "** No output."); hipFree(d_inputKeys); hipFree(d_inputVals); hipFree(d_inputOffsetSizes); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); endTimer("Map", &mapTimer); return 1; } //----------------------------------------------- //6, allocate intermediate memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_interKeys = NULL; checkCudaErrors(hipMalloc((void**)&d_interKeys, h_allKeySize)); hipMemset(d_interKeys, 0, h_allKeySize); char* d_interVals = NULL; checkCudaErrors(hipMalloc((void**)&d_interVals, h_allValSize)); hipMemset(d_interVals, 0, h_allValSize); int4* d_interOffsetSizes = NULL; checkCudaErrors(hipMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts)); hipMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts); //-------------------------------------------------- //7, start map //-------------------------------------------------- DoLog( "** Map"); int2* d_keyValOffsets = NULL; checkCudaErrors(hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads)); hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; checkCudaErrors(hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads)); hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; hipLaunchKernelGGL(( Mapper), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_inputKeys, d_inputVals, d_inputOffsetSizes, d_psKeySizes, d_psValSizes, d_psCounts, d_keyValOffsets, d_interKeys, d_interVals, d_interOffsetSizes, d_curIndex, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); g_spec->interKeys = d_interKeys; g_spec->interVals = d_interVals; g_spec->interOffsetSizes = d_interOffsetSizes; g_spec->interRecordCount = h_allCounts; g_spec->interDiffKeyCount = h_allCounts; g_spec->interAllKeySize = h_allKeySize; g_spec->interAllValSize = h_allValSize; //---------------------------------------------- //8, free //---------------------------------------------- hipFree(d_interKeysSizePerTask); hipFree(d_interValsSizePerTask); hipFree(d_interCountPerTask); hipFree(d_keyValOffsets); hipFree(d_curIndex); hipFree(d_inputKeys); hipFree(d_inputVals); hipFree(d_inputOffsetSizes); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); endTimer("Map", &mapTimer); return 0; } void startGroup(Spec_t* spec) { Spec_t* g_spec = spec; int interDiffKeyCount = 0; char* d_outputKeys = NULL; char* d_outputVals = NULL; int4* d_outputOffsetSizes = NULL; int2** h_outputKeyListRange = NULL; DoLog( "** Sort for group"); checkCudaErrors(hipMalloc((void**)&d_outputKeys, g_spec->interAllKeySize)); checkCudaErrors(hipMalloc((void**)&d_outputVals, g_spec->interAllValSize)); checkCudaErrors(hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount)); h_outputKeyListRange = (int2**)malloc(sizeof(int2*)); saven_initialPrefixSum(g_spec->interRecordCount); interDiffKeyCount = sort_GPU (g_spec->interKeys, g_spec->interAllKeySize, g_spec->interVals, g_spec->interAllValSize, g_spec->interOffsetSizes, g_spec->interRecordCount, d_outputKeys, d_outputVals, d_outputOffsetSizes, h_outputKeyListRange); DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount); g_spec->interKeys = d_outputKeys; g_spec->interVals = d_outputVals; g_spec->interOffsetSizes = d_outputOffsetSizes; g_spec->interDiffKeyCount = interDiffKeyCount; int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2); checkCudaErrors(hipMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize)); checkCudaErrors(hipMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, hipMemcpyHostToDevice)); free(*h_outputKeyListRange); free(h_outputKeyListRange); } //-------------------------------------------------------- //get a value from value list of the same key // //param : vals //param : interOffsetSizes //param : index //return: the wanted value //-------------------------------------------------------- __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)vals + keyIndex * offset.w); } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)key + keyIndex * offset.y); } //--------------------------------------------------------- //called by user defined reduce_count function //--------------------------------------------------------- __device__ void EmitCount(int keySize, int valSize, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask) { int index = TID; outputKeysSizePerTask[index] += keySize; outputValsSizePerTask[index] += valSize; outputCountPerTask[index]++; } //--------------------------------------------------------- //called by user defined reduce function //--------------------------------------------------------- __device__ void Emit (char* key, char* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x); char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y); for (int i = 0; i < keySize; i++) pKeySet[i] = key[i]; for (int i = 0; i < valSize; i++) pValSet[i] = val[i]; keyValOffsets[index].x += keySize; keyValOffsets[index].y += valSize; if (curIndex[index] != 0) { outputOffsetSizes[psCounts[index] + curIndex[index]].x = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y); outputOffsetSizes[psCounts[index] + curIndex[index]].z = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w); } outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize; outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize; curIndex[index]++; } //------------------------------------------------------- //calculate output data's size //------------------------------------------------------- __global__ void ReducerCount(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; //for (int i = 0; i <= recordsPerTask; i++) for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce_count(key, vals, keySize, valCount, interOffsetSizes, outputKeysSizePerTask, outputValsSizePerTask, outputCountPerTask); } } //------------------------------------------------------- //Reducer // //------------------------------------------------------- __global__ void Reducer(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* psKeySizes, int* psValSizes, int* psCounts, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int2* keyValOffsets, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; outputOffsetSizes[psCounts[index]].x = psKeySizes[index]; outputOffsetSizes[psCounts[index]].z = psValSizes[index]; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce(key, vals, keySize, valCount, psKeySizes, psValSizes, psCounts, keyValOffsets, interOffsetSizes, outputKeys, outputVals, outputOffsetSizes, curIndex, valStartIndex); } } //---------------------------------------------- //start reduce // //1, if there is not a reduce phase, just return // then user uses spec->interKeys/spec->intervals // for further processing //2, get reduce input data on host //3, upload reduce input data onto device memory //4, determine the number of threads to run //5, calculate output data keys'buf size // and values' buf size //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //7, allocate output memory on device memory //8, start reduce //9, copy output data to Spect_t structure //10,free allocated memory //---------------------------------------------- void startReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);} if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);} if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);} if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);} if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);} if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);} //------------------------------------------------------- //2, get reduce input data on host //------------------------------------------------------- int h_interDiffKeyCount = g_spec->interDiffKeyCount; char* d_interKeys = g_spec->interKeys; char* d_interVals = g_spec->interVals; int4* d_interOffsetSizes = g_spec->interOffsetSizes; int2* d_interKeyListRange = g_spec->interKeyListRange; //---------------------------------------------- //4, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockReduce,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskReduce; int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; //---------------------------------------------- //5, calculate output data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** ReduceCount"); int* d_outputKeysSizePerTask = NULL; hipMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputValsSizePerTask = NULL; hipMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputCountPerTask = NULL; hipMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads); hipMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads); hipLaunchKernelGGL(( ReducerCount), dim3(h_dimGrid), dim3(h_dimBlock), 0, 0, d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_outputKeysSizePerTask, d_outputValsSizePerTask, d_outputCountPerTask, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //----------------------------------------------- //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on output data's size"); int *d_psKeySizes = NULL; hipMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads); hipMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads); int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; hipMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads); hipMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads); int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; hipMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads); hipMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads); int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts); //----------------------------------------------- //7, allocate output memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_outputKeys = NULL; hipMalloc((void**)&d_outputKeys, h_allKeySize); char* d_outputVals = NULL; hipMalloc((void**)&d_outputVals, h_allValSize); int4* d_outputOffsetSizes = NULL; hipMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts); //-------------------------------------------------- //8, start reduce //-------------------------------------------------- DoLog( "** Reduce"); int2* d_keyValOffsets = NULL; hipMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads); hipMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; hipMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads); hipMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; hipLaunchKernelGGL(( Reducer), dim3(h_dimGrid), dim3(h_dimBlock), sizeSmem, 0, d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_psKeySizes, d_psValSizes, d_psCounts, d_outputKeys, d_outputVals, d_outputOffsetSizes, d_keyValOffsets, d_curIndex, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); hipDeviceSynchronize(); //------------------------------------------------------- //9, copy output data to Spec_t structure //------------------------------------------------------- g_spec->outputKeys = d_outputKeys; g_spec->outputVals = d_outputVals; g_spec->outputOffsetSizes = d_outputOffsetSizes; g_spec->outputRecordCount = h_allCounts; g_spec->outputAllKeySize = h_allKeySize; g_spec->outputAllValSize = h_allValSize; //---------------------------------------------- //10, free allocated memory //---------------------------------------------- hipFree(d_interKeys); hipFree(d_interVals); hipFree(d_interOffsetSizes); hipFree(d_outputKeysSizePerTask); hipFree(d_outputValsSizePerTask); hipFree(d_outputCountPerTask); hipFree(d_psKeySizes); hipFree(d_psValSizes); hipFree(d_psCounts); hipFree(d_keyValOffsets); hipFree(d_curIndex); } //---------------------------------------------- //start main map reduce procedure //1, init device //2, start map //3, start reduce // //param : spec //---------------------------------------------- void MapReduce(Spec_t *spec) { assert(NULL != spec); Spec_t* g_spec = spec; DoLog( "=====start map/reduce====="); //------------------------------------------- //1, init device //------------------------------------------- //CUT_DEVICE_INIT(); DoLog( "** init GPU"); InitMapReduce(spec); //------------------------------------------- //2, start map //------------------------------------------- DoLog( "----------start map-----------"); if (startMap(spec)) { printf("** No output."); return; } if (g_spec->workflow == MAP_ONLY) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; goto EXIT_MR; } //------------------------------------------- //3, start group //------------------------------------------- DoLog( "----------start group-----------"); TimeVal_t groupTimer; startTimer(&groupTimer); startGroup(spec); endTimer("Group", &groupTimer); if (g_spec->workflow == MAP_GROUP) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount; if (g_spec->outputToHost == 1) { g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount); checkCudaErrors(hipMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(g_spec->interKeyListRange)); } goto EXIT_MR; } //------------------------------------------- //4, start reduce //------------------------------------------- DoLog( "----------start reduce--------"); TimeVal_t reduceTimer; startTimer(&reduceTimer); startReduce(spec); endTimer("Reduce", &reduceTimer); EXIT_MR: if (g_spec->outputToHost == 1) { int indexSize = g_spec->outputRecordCount * sizeof(int4); char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize); if (h_outputKeys == NULL) exit(0); char* h_outputVals = (char*)malloc(g_spec->outputAllValSize); if (h_outputVals == NULL) exit(0); int4* h_outputOffsetSizes = (int4*)malloc(indexSize); if (h_outputOffsetSizes == NULL) exit(0); checkCudaErrors(hipMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(g_spec->outputKeys)); checkCudaErrors(hipFree(g_spec->outputVals)); checkCudaErrors(hipFree(g_spec->outputOffsetSizes)); g_spec->outputKeys = h_outputKeys; g_spec->outputVals = h_outputVals; g_spec->outputOffsetSizes = h_outputOffsetSizes; } } //------------------------------------------ //the last step // //1, free global variables' memory //2, close log file's file pointer //------------------------------------------ void FinishMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; //------------------------------------------- //1, free global variables' memory //------------------------------------------- free(g_spec->inputKeys); free(g_spec->inputVals); free(g_spec->inputOffsetSizes); if (g_spec->outputToHost == 1) { free(g_spec->outputKeys); free(g_spec->outputVals); free(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) free(g_spec->outputKeyListRange); } else { hipFree(g_spec->outputKeys); hipFree(g_spec->outputVals); hipFree(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) hipFree(g_spec->outputKeyListRange); } free(g_spec); DoLog( "=====finish map/reduce====="); } #endif //__MRLIB_CU__
00f1972302402e0d9cd1380bc3184df78c211437.cu
/*$Id: MarsLib.cu 755 2009-11-18 13:22:54Z wenbinor $*/ /** *This is the source code for Mars, a MapReduce framework on graphics *processors. *Developers: Wenbin Fang (HKUST), Bingsheng He (Microsoft Research Asia) *Naga K. Govindaraju (Microsoft Corp.), Qiong Luo (HKUST), Tuyong Wang (Sina.com). *If you have any question on the code, please contact us at * [email protected] or [email protected] * *The license is a free non-exclusive, non-transferable license to reproduce, *use, modify and display the source code version of the Software, with or *without modifications solely for non-commercial research, educational or *evaluation purposes. The license does not entitle Licensee to technical support, *telephone assistance, enhancements or updates to the Software. All rights, title *to and ownership interest in Mars, including all intellectual property rights *therein shall remain in HKUST. */ #ifndef __MRLIB_CU__ #define __MRLIB_CU__ #include "MarsInc.h" #include "map.h" #include "reduce.h" #include <helper_cuda.h> //---------------------------------------------- //Get default runtime configuration // //return: default spec //---------------------------------------------- Spec_t *GetDefaultSpec() { Spec_t *spec = (Spec_t*)malloc(sizeof(Spec_t)); if (NULL == spec) exit(-1); memset(spec, 0, sizeof(Spec_t)); return spec; } //-------------------------------------------------------- //Initiate map reduce spec //-------------------------------------------------------- void InitMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->dimBlockMap <= 0) g_spec->dimBlockMap = DEFAULT_DIMBLOCK; if (g_spec->dimBlockReduce <= 0) g_spec->dimBlockReduce = DEFAULT_DIMBLOCK; if (g_spec->numRecTaskReduce <= 0) g_spec->numRecTaskReduce = DEFAULT_NUMTASK; if (g_spec->numRecTaskMap <= 0) g_spec->numRecTaskMap = DEFAULT_NUMTASK; if (g_spec->workflow <= 0) g_spec->workflow = MAP_ONLY; } //-------------------------------------------------- //Add a map input record // //param : spec //param : key -- a pointer to a buffer //param : val -- a pointer to a buffer //param : keySize //param : valSize //-------------------------------------------------- void AddMapInputRecord(Spec_t* spec, void* key, void* val, int keySize, int valSize) { assert(NULL != spec); static int2 curOffset; static int3 curChunkNum; int index = spec->inputRecordCount; const int dataChunkSize = 1024*1024*256; if (spec->inputRecordCount > 0) { if (dataChunkSize*curChunkNum.x < (curOffset.x + keySize)) spec->inputKeys = (char*)realloc(spec->inputKeys, (++curChunkNum.x)*dataChunkSize); memcpy(spec->inputKeys+curOffset.x, key, keySize); if (dataChunkSize*curChunkNum.y < (curOffset.y + valSize)) spec->inputVals = (char*)realloc(spec->inputVals, (++curChunkNum.y)*dataChunkSize); memcpy(spec->inputVals+curOffset.y, val, valSize); if (dataChunkSize*curChunkNum.z < (spec->inputRecordCount+1)*sizeof(int4)) spec->inputOffsetSizes = (int4*)realloc(spec->inputOffsetSizes, (++curChunkNum.z)*dataChunkSize); } else { spec->inputKeys = (char*)malloc(dataChunkSize); if (NULL == spec->inputKeys) exit(-1); memcpy(spec->inputKeys, key, keySize); spec->inputVals = (char*)malloc(dataChunkSize); if (NULL == spec->inputVals) exit(-1); memcpy(spec->inputVals, val, valSize); spec->inputOffsetSizes = (int4*)malloc(dataChunkSize); curChunkNum.x++; curChunkNum.y++; curChunkNum.z++; } spec->inputOffsetSizes[index].x = curOffset.x; spec->inputOffsetSizes[index].y = keySize; spec->inputOffsetSizes[index].z = curOffset.y; spec->inputOffsetSizes[index].w = valSize; curOffset.x += keySize; curOffset.y += valSize; spec->inputRecordCount++; } //------------------------------------------------- //Called by user defined map_count function // //param : keySize //param : valSize //param : interKeysSizePerTask //param : interValsSizePerTask //param : interCountPerTask //------------------------------------------------- __device__ void EmitInterCount(int keySize, int valSize, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask) { int index = TID; interKeysSizePerTask[index] += keySize; interValsSizePerTask[index] += valSize; interCountPerTask[index]++; } //------------------------------------------------- //called by user defined map function // //------------------------------------------------- __device__ void EmitIntermediate(void* key, void* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; int2 l_keyValOffsets = keyValOffsets[index]; char *pKeySet = (char*)(interKeys + psKeySizes[index] + l_keyValOffsets.x); char *pValSet = (char*)(interVals + psValSizes[index] + l_keyValOffsets.y); char* sKey = (char*)key; char* sVal = (char*)val; for (int i = 0; i < keySize; ++i) pKeySet[i] = sKey[i]; for (int i = 0; i < valSize; ++i) pValSet[i] = sVal[i]; l_keyValOffsets.x += keySize; l_keyValOffsets.y += valSize; keyValOffsets[index] = l_keyValOffsets; int l_curIndex = curIndex[index]; int l_psCounts = psCounts[index]; int l_curPs = l_curIndex + l_psCounts; int4 l_interOffsetSizes1 = interOffsetSizes[l_curPs]; int4 l_interOffsetSizes2 = interOffsetSizes[l_curPs-1]; if (l_curIndex != 0) { l_interOffsetSizes1.x = (l_interOffsetSizes2.x + l_interOffsetSizes2.y); l_interOffsetSizes1.z = (l_interOffsetSizes2.z + l_interOffsetSizes2.w); } l_interOffsetSizes1.y = keySize; l_interOffsetSizes1.w = valSize; interOffsetSizes[l_curPs] = l_interOffsetSizes1; ++l_curIndex; curIndex[index] = l_curIndex; } //------------------------------------------------- //Calculate intermediate data's size // //param : inputKeys //param : inputVals //param : inputOffsetSizes //param : interKeysSizesPerTask //param : interValsSizePerTask //param : interCountPerTask //param : recordNum -- total number of records //param : recordsPerTask //------------------------------------------------- __global__ void MapperCount(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* interKeysSizePerTask, int* interValsSizePerTask, int* interCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map_count(key, val, offsetSize.y, offsetSize.w, interKeysSizePerTask, interValsSizePerTask, interCountPerTask); } } //-------------------------------------------------- //mapper //-------------------------------------------------- __global__ void Mapper(char* inputKeys, char* inputVals, int4* inputOffsetSizes, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* interKeys, char* interVals, int4* interOffsetSizes, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; int l_psCounts = psCounts[index]; int4 l_interOffsetSizes = interOffsetSizes[l_psCounts]; l_interOffsetSizes.x = psKeySizes[index]; l_interOffsetSizes.z = psValSizes[index]; interOffsetSizes[l_psCounts] = l_interOffsetSizes; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int4 offsetSize = inputOffsetSizes[cindex]; char *key = inputKeys + offsetSize.x; char *val = inputVals + offsetSize.z; map(key, val, offsetSize.y, offsetSize.w, psKeySizes, psValSizes, psCounts, keyValOffsets, interKeys, interVals, interOffsetSizes, curIndex); } } //-------------------------------------------------- //start map // //1, get map input data on host //2, upload map input data to device memory // (keys, vals, keyOffsets, valOffsets, keySizes, valSizes) //3, determine the number of threads to run //4, calculate intermediate data keys'buf size // and values' buf size //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //6, allocate intermediate memory on device memory //7, start map //8, free allocated memory //-------------------------------------------------- int startMap(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->inputKeys == NULL) { DoLog("Error: no any input keys"); exit(0);} if (g_spec->inputVals == NULL) { DoLog("Error: no any input values"); exit(0); } if (g_spec->inputOffsetSizes == NULL) { DoLog( "Error: no any input pointer info"); exit(0); } if (g_spec->inputRecordCount == 0) {DoLog( "Error: invalid input record count"); exit(0);} //------------------------------------------------------- //1, get map input data on host //------------------------------------------------------- int h_inputRecordCount = g_spec->inputRecordCount; int h_inputKeysBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].x + g_spec->inputOffsetSizes[h_inputRecordCount-1].y; int h_inputValsBufSize = g_spec->inputOffsetSizes[h_inputRecordCount-1].z + g_spec->inputOffsetSizes[h_inputRecordCount-1].w; char* h_inputKeys = g_spec->inputKeys; char* h_inputVals = g_spec->inputVals; int4* h_inputOffsetSizes = g_spec->inputOffsetSizes; DoLog( "** Map Input: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_inputKeysBufSize, h_inputValsBufSize, sizeof(int4)*h_inputRecordCount, h_inputRecordCount); //------------------------------------------------------- //2, upload map input data onto device memory //------------------------------------------------------- DoLog( "** Upload map input data onto device memory"); TimeVal_t uploadTv; startTimer(&uploadTv); char* d_inputKeys = NULL; char* d_inputVals = NULL; int4* d_inputOffsetSizes = NULL; checkCudaErrors(cudaMalloc((void**)&d_inputKeys, h_inputKeysBufSize)); checkCudaErrors(cudaMemcpy(d_inputKeys, h_inputKeys, h_inputKeysBufSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&d_inputVals, h_inputValsBufSize)); checkCudaErrors(cudaMemcpy(d_inputVals, h_inputVals, h_inputValsBufSize, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&d_inputOffsetSizes, sizeof(int4)*h_inputRecordCount)); cudaMemcpy(d_inputOffsetSizes, h_inputOffsetSizes, sizeof(int4)*h_inputRecordCount, cudaMemcpyHostToDevice); endTimer("PCI-E I/O", &uploadTv); //---------------------------------------------- //3, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockMap,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskMap; int numBlocks = CEIL(CEIL(h_inputRecordCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; TimeVal_t mapTimer; startTimer(&mapTimer); //---------------------------------------------- //4, calculate intermediate data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** MapCount"); int* d_interKeysSizePerTask = NULL; checkCudaErrors(cudaMalloc((void**)&d_interKeysSizePerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interValsSizePerTask = NULL; checkCudaErrors(cudaMalloc((void**)&d_interValsSizePerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_interCountPerTask = NULL; checkCudaErrors(cudaMalloc((void**)&d_interCountPerTask, sizeof(int)*h_actualNumThreads)); cudaMemset(d_interCountPerTask, 0, sizeof(int)*h_actualNumThreads); MapperCount<<<h_dimGrid, h_dimBlock>>>(d_inputKeys, d_inputVals, d_inputOffsetSizes, d_interKeysSizePerTask, d_interValsSizePerTask, d_interCountPerTask, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //----------------------------------------------- //5, do prefix sum on-- // i) d_interKeysSizePerTask // ii) d_interValsSizePerTask // iii) d_interCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on intermediate data's size\n"); int *d_psKeySizes = NULL; checkCudaErrors(cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads)); int h_allKeySize = prefexSum((int*)d_interKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; checkCudaErrors(cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads)); int h_allValSize = prefexSum((int*)d_interValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; checkCudaErrors(cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads)); int h_allCounts = prefexSum((int*)d_interCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog( "** Map Output: keys buf size %d bytes, vals buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts * sizeof(int4), h_allCounts); if (h_allCounts == 0) { DoLog( "** No output."); cudaFree(d_inputKeys); cudaFree(d_inputVals); cudaFree(d_inputOffsetSizes); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); endTimer("Map", &mapTimer); return 1; } //----------------------------------------------- //6, allocate intermediate memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_interKeys = NULL; checkCudaErrors(cudaMalloc((void**)&d_interKeys, h_allKeySize)); cudaMemset(d_interKeys, 0, h_allKeySize); char* d_interVals = NULL; checkCudaErrors(cudaMalloc((void**)&d_interVals, h_allValSize)); cudaMemset(d_interVals, 0, h_allValSize); int4* d_interOffsetSizes = NULL; checkCudaErrors(cudaMalloc((void**)&d_interOffsetSizes, sizeof(int4)*h_allCounts)); cudaMemset(d_interOffsetSizes, 0, sizeof(int4)*h_allCounts); //-------------------------------------------------- //7, start map //-------------------------------------------------- DoLog( "** Map"); int2* d_keyValOffsets = NULL; checkCudaErrors(cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads)); cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; checkCudaErrors(cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads)); cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; Mapper<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_inputKeys, d_inputVals, d_inputOffsetSizes, d_psKeySizes, d_psValSizes, d_psCounts, d_keyValOffsets, d_interKeys, d_interVals, d_interOffsetSizes, d_curIndex, h_inputRecordCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); g_spec->interKeys = d_interKeys; g_spec->interVals = d_interVals; g_spec->interOffsetSizes = d_interOffsetSizes; g_spec->interRecordCount = h_allCounts; g_spec->interDiffKeyCount = h_allCounts; g_spec->interAllKeySize = h_allKeySize; g_spec->interAllValSize = h_allValSize; //---------------------------------------------- //8, free //---------------------------------------------- cudaFree(d_interKeysSizePerTask); cudaFree(d_interValsSizePerTask); cudaFree(d_interCountPerTask); cudaFree(d_keyValOffsets); cudaFree(d_curIndex); cudaFree(d_inputKeys); cudaFree(d_inputVals); cudaFree(d_inputOffsetSizes); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); endTimer("Map", &mapTimer); return 0; } void startGroup(Spec_t* spec) { Spec_t* g_spec = spec; int interDiffKeyCount = 0; char* d_outputKeys = NULL; char* d_outputVals = NULL; int4* d_outputOffsetSizes = NULL; int2** h_outputKeyListRange = NULL; DoLog( "** Sort for group"); checkCudaErrors(cudaMalloc((void**)&d_outputKeys, g_spec->interAllKeySize)); checkCudaErrors(cudaMalloc((void**)&d_outputVals, g_spec->interAllValSize)); checkCudaErrors(cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*g_spec->interRecordCount)); h_outputKeyListRange = (int2**)malloc(sizeof(int2*)); saven_initialPrefixSum(g_spec->interRecordCount); interDiffKeyCount = sort_GPU (g_spec->interKeys, g_spec->interAllKeySize, g_spec->interVals, g_spec->interAllValSize, g_spec->interOffsetSizes, g_spec->interRecordCount, d_outputKeys, d_outputVals, d_outputOffsetSizes, h_outputKeyListRange); DoLog( "** InterRecordCount:%d, number of groups: %d", g_spec->interRecordCount, interDiffKeyCount); g_spec->interKeys = d_outputKeys; g_spec->interVals = d_outputVals; g_spec->interOffsetSizes = d_outputOffsetSizes; g_spec->interDiffKeyCount = interDiffKeyCount; int keyListRangeSize = g_spec->interDiffKeyCount * sizeof(int2); checkCudaErrors(cudaMalloc((void**)&g_spec->interKeyListRange, keyListRangeSize)); checkCudaErrors(cudaMemcpy(g_spec->interKeyListRange, *h_outputKeyListRange, keyListRangeSize, cudaMemcpyHostToDevice)); free(*h_outputKeyListRange); free(h_outputKeyListRange); } //-------------------------------------------------------- //get a value from value list of the same key // //param : vals //param : interOffsetSizes //param : index //return: the wanted value //-------------------------------------------------------- __device__ void *GetVal(void *vals, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)vals + keyIndex * offset.w); } __device__ void *GetKey(void *key, int4* interOffsetSizes, int keyIndex, int valStartIndex) { int4 offset = interOffsetSizes[valStartIndex]; return (void*)((char*)key + keyIndex * offset.y); } //--------------------------------------------------------- //called by user defined reduce_count function //--------------------------------------------------------- __device__ void EmitCount(int keySize, int valSize, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask) { int index = TID; outputKeysSizePerTask[index] += keySize; outputValsSizePerTask[index] += valSize; outputCountPerTask[index]++; } //--------------------------------------------------------- //called by user defined reduce function //--------------------------------------------------------- __device__ void Emit (char* key, char* val, int keySize, int valSize, int* psKeySizes, int* psValSizes, int* psCounts, int2* keyValOffsets, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int* curIndex) { #ifndef __DEVICE_EMULATION__ __syncthreads(); #endif int index = TID; char *pKeySet = (char*)(outputKeys + psKeySizes[index] + keyValOffsets[index].x); char *pValSet = (char*)(outputVals + psValSizes[index] + keyValOffsets[index].y); for (int i = 0; i < keySize; i++) pKeySet[i] = key[i]; for (int i = 0; i < valSize; i++) pValSet[i] = val[i]; keyValOffsets[index].x += keySize; keyValOffsets[index].y += valSize; if (curIndex[index] != 0) { outputOffsetSizes[psCounts[index] + curIndex[index]].x = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].x + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].y); outputOffsetSizes[psCounts[index] + curIndex[index]].z = (outputOffsetSizes[psCounts[index] + curIndex[index] - 1].z + outputOffsetSizes[psCounts[index] + curIndex[index] - 1].w); } outputOffsetSizes[psCounts[index] + curIndex[index]].y = keySize; outputOffsetSizes[psCounts[index] + curIndex[index]].w = valSize; curIndex[index]++; } //------------------------------------------------------- //calculate output data's size //------------------------------------------------------- __global__ void ReducerCount(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* outputKeysSizePerTask, int* outputValsSizePerTask, int* outputCountPerTask, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; //for (int i = 0; i <= recordsPerTask; i++) for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce_count(key, vals, keySize, valCount, interOffsetSizes, outputKeysSizePerTask, outputValsSizePerTask, outputCountPerTask); } } //------------------------------------------------------- //Reducer // //------------------------------------------------------- __global__ void Reducer(char* interKeys, char* interVals, int4* interOffsetSizes, int2* interKeyListRange, int* psKeySizes, int* psValSizes, int* psCounts, char* outputKeys, char* outputVals, int4* outputOffsetSizes, int2* keyValOffsets, int* curIndex, int recordNum, int recordsPerTask, int taskNum) { int index = TID; int bid = BLOCK_ID; int tid = THREAD_ID; if (index*recordsPerTask >= recordNum) return; int recordBase = bid * recordsPerTask * blockDim.x; int terminate = (bid + 1) * (recordsPerTask * blockDim.x); if (terminate > recordNum) terminate = recordNum; outputOffsetSizes[psCounts[index]].x = psKeySizes[index]; outputOffsetSizes[psCounts[index]].z = psValSizes[index]; for (int i = recordBase + tid; i < terminate; i+=blockDim.x) { int cindex = i; int valStartIndex = interKeyListRange[cindex].x; int valCount = interKeyListRange[cindex].y - interKeyListRange[cindex].x; int keySize = interOffsetSizes[interKeyListRange[cindex].x].y; char *key = interKeys + interOffsetSizes[valStartIndex].x; char *vals = interVals + interOffsetSizes[valStartIndex].z; reduce(key, vals, keySize, valCount, psKeySizes, psValSizes, psCounts, keyValOffsets, interOffsetSizes, outputKeys, outputVals, outputOffsetSizes, curIndex, valStartIndex); } } //---------------------------------------------- //start reduce // //1, if there is not a reduce phase, just return // then user uses spec->interKeys/spec->intervals // for further processing //2, get reduce input data on host //3, upload reduce input data onto device memory //4, determine the number of threads to run //5, calculate output data keys'buf size // and values' buf size //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //7, allocate output memory on device memory //8, start reduce //9, copy output data to Spect_t structure //10,free allocated memory //---------------------------------------------- void startReduce(Spec_t* spec) { Spec_t* g_spec = spec; if (g_spec->interKeys == NULL) {DoLog( "Error: no any intermediate keys"); exit(0);} if (g_spec->interVals == NULL) {DoLog( "Error: no any intermediate values"); exit(0);} if (g_spec->interOffsetSizes == NULL) {DoLog( "Error: no any intermediate pointer info");exit(0);} if (g_spec->interRecordCount == 0) {DoLog( "Error: invalid intermediate record count");exit(0);} if (g_spec->interKeyListRange == NULL) { DoLog( "Error: no any key list range");exit(0);} if (g_spec->interDiffKeyCount == 0) { DoLog( "Error: invalid intermediate diff key count");exit(0);} //------------------------------------------------------- //2, get reduce input data on host //------------------------------------------------------- int h_interDiffKeyCount = g_spec->interDiffKeyCount; char* d_interKeys = g_spec->interKeys; char* d_interVals = g_spec->interVals; int4* d_interOffsetSizes = g_spec->interOffsetSizes; int2* d_interKeyListRange = g_spec->interKeyListRange; //---------------------------------------------- //4, determine the number of threads to run //---------------------------------------------- dim3 h_dimBlock(g_spec->dimBlockReduce,1,1); dim3 h_dimGrid(1,1,1); int h_recordsPerTask = g_spec->numRecTaskReduce; int numBlocks = CEIL(CEIL(h_interDiffKeyCount, h_recordsPerTask), h_dimBlock.x); THREAD_CONF(h_dimGrid, h_dimBlock, numBlocks, h_dimBlock.x); int h_actualNumThreads = h_dimGrid.x*h_dimBlock.x*h_dimGrid.y; //---------------------------------------------- //5, calculate output data keys'buf size // and values' buf size //---------------------------------------------- DoLog( "** ReduceCount"); int* d_outputKeysSizePerTask = NULL; cudaMalloc((void**)&d_outputKeysSizePerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputKeysSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputValsSizePerTask = NULL; cudaMalloc((void**)&d_outputValsSizePerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputValsSizePerTask, 0, sizeof(int)*h_actualNumThreads); int* d_outputCountPerTask = NULL; cudaMalloc((void**)&d_outputCountPerTask, sizeof(int)*h_actualNumThreads); cudaMemset(d_outputCountPerTask, 0, sizeof(int)*h_actualNumThreads); ReducerCount<<<h_dimGrid, h_dimBlock>>>(d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_outputKeysSizePerTask, d_outputValsSizePerTask, d_outputCountPerTask, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //----------------------------------------------- //6, do prefix sum on-- // i) d_outputKeysSizePerTask // ii) d_outputValsSizePerTask // iii) d_outputCountPerTask //----------------------------------------------- DoLog( "** Do prefix sum on output data's size"); int *d_psKeySizes = NULL; cudaMalloc((void**)&d_psKeySizes, sizeof(int)*h_actualNumThreads); cudaMemset(d_psKeySizes, 0, sizeof(int)*h_actualNumThreads); int h_allKeySize = prefexSum((int*)d_outputKeysSizePerTask, (int*)d_psKeySizes, h_actualNumThreads); int *d_psValSizes = NULL; cudaMalloc((void**)&d_psValSizes, sizeof(int)*h_actualNumThreads); cudaMemset(d_psValSizes, 0, sizeof(int)*h_actualNumThreads); int h_allValSize = prefexSum((int*)d_outputValsSizePerTask, (int*)d_psValSizes, h_actualNumThreads); int *d_psCounts = NULL; cudaMalloc((void**)&d_psCounts, sizeof(int)*h_actualNumThreads); cudaMemset(d_psCounts, 0, sizeof(int)*h_actualNumThreads); int h_allCounts = prefexSum((int*)d_outputCountPerTask, (int*)d_psCounts, h_actualNumThreads); DoLog("** Reduce Output: key buf size %d bytes, val buf size %d bytes, index buf size %d bytes, %d records", h_allKeySize, h_allValSize, h_allCounts*sizeof(int4),h_allCounts); //----------------------------------------------- //7, allocate output memory on device memory //----------------------------------------------- DoLog( "** Allocate intermediate memory on device memory"); char* d_outputKeys = NULL; cudaMalloc((void**)&d_outputKeys, h_allKeySize); char* d_outputVals = NULL; cudaMalloc((void**)&d_outputVals, h_allValSize); int4* d_outputOffsetSizes = NULL; cudaMalloc((void**)&d_outputOffsetSizes, sizeof(int4)*h_allCounts); //-------------------------------------------------- //8, start reduce //-------------------------------------------------- DoLog( "** Reduce"); int2* d_keyValOffsets = NULL; cudaMalloc((void**)&d_keyValOffsets, sizeof(int2)*h_actualNumThreads); cudaMemset(d_keyValOffsets, 0, sizeof(int2)*h_actualNumThreads); int* d_curIndex = NULL; cudaMalloc((void**)&d_curIndex, sizeof(int)*h_actualNumThreads); cudaMemset(d_curIndex, 0, sizeof(int)*h_actualNumThreads); int sizeSmem = h_dimBlock.x * sizeof(int) * 5; Reducer<<<h_dimGrid, h_dimBlock, sizeSmem>>>(d_interKeys, d_interVals, d_interOffsetSizes, d_interKeyListRange, d_psKeySizes, d_psValSizes, d_psCounts, d_outputKeys, d_outputVals, d_outputOffsetSizes, d_keyValOffsets, d_curIndex, h_interDiffKeyCount, h_recordsPerTask, h_actualNumThreads); cudaThreadSynchronize(); //------------------------------------------------------- //9, copy output data to Spec_t structure //------------------------------------------------------- g_spec->outputKeys = d_outputKeys; g_spec->outputVals = d_outputVals; g_spec->outputOffsetSizes = d_outputOffsetSizes; g_spec->outputRecordCount = h_allCounts; g_spec->outputAllKeySize = h_allKeySize; g_spec->outputAllValSize = h_allValSize; //---------------------------------------------- //10, free allocated memory //---------------------------------------------- cudaFree(d_interKeys); cudaFree(d_interVals); cudaFree(d_interOffsetSizes); cudaFree(d_outputKeysSizePerTask); cudaFree(d_outputValsSizePerTask); cudaFree(d_outputCountPerTask); cudaFree(d_psKeySizes); cudaFree(d_psValSizes); cudaFree(d_psCounts); cudaFree(d_keyValOffsets); cudaFree(d_curIndex); } //---------------------------------------------- //start main map reduce procedure //1, init device //2, start map //3, start reduce // //param : spec //---------------------------------------------- void MapReduce(Spec_t *spec) { assert(NULL != spec); Spec_t* g_spec = spec; DoLog( "=====start map/reduce====="); //------------------------------------------- //1, init device //------------------------------------------- //CUT_DEVICE_INIT(); DoLog( "** init GPU"); InitMapReduce(spec); //------------------------------------------- //2, start map //------------------------------------------- DoLog( "----------start map-----------"); if (startMap(spec)) { printf("** No output."); return; } if (g_spec->workflow == MAP_ONLY) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; goto EXIT_MR; } //------------------------------------------- //3, start group //------------------------------------------- DoLog( "----------start group-----------"); TimeVal_t groupTimer; startTimer(&groupTimer); startGroup(spec); endTimer("Group", &groupTimer); if (g_spec->workflow == MAP_GROUP) { g_spec->outputKeys = g_spec->interKeys; g_spec->outputVals = g_spec->interVals; g_spec->outputOffsetSizes = g_spec->interOffsetSizes; g_spec->outputRecordCount = g_spec->interRecordCount; g_spec->outputAllKeySize = g_spec->interAllKeySize; g_spec->outputAllValSize = g_spec->interAllValSize; g_spec->outputDiffKeyCount = g_spec->interDiffKeyCount; if (g_spec->outputToHost == 1) { g_spec->outputKeyListRange = (int2*)malloc(sizeof(int2)*g_spec->outputDiffKeyCount); checkCudaErrors(cudaMemcpy(g_spec->outputKeyListRange, g_spec->interKeyListRange, sizeof(int2)*g_spec->outputDiffKeyCount, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(g_spec->interKeyListRange)); } goto EXIT_MR; } //------------------------------------------- //4, start reduce //------------------------------------------- DoLog( "----------start reduce--------"); TimeVal_t reduceTimer; startTimer(&reduceTimer); startReduce(spec); endTimer("Reduce", &reduceTimer); EXIT_MR: if (g_spec->outputToHost == 1) { int indexSize = g_spec->outputRecordCount * sizeof(int4); char* h_outputKeys = (char*)malloc(g_spec->outputAllKeySize); if (h_outputKeys == NULL) exit(0); char* h_outputVals = (char*)malloc(g_spec->outputAllValSize); if (h_outputVals == NULL) exit(0); int4* h_outputOffsetSizes = (int4*)malloc(indexSize); if (h_outputOffsetSizes == NULL) exit(0); checkCudaErrors(cudaMemcpy(h_outputKeys, g_spec->outputKeys, g_spec->outputAllKeySize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_outputVals, g_spec->outputVals, g_spec->outputAllValSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_outputOffsetSizes, g_spec->outputOffsetSizes, indexSize, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(g_spec->outputKeys)); checkCudaErrors(cudaFree(g_spec->outputVals)); checkCudaErrors(cudaFree(g_spec->outputOffsetSizes)); g_spec->outputKeys = h_outputKeys; g_spec->outputVals = h_outputVals; g_spec->outputOffsetSizes = h_outputOffsetSizes; } } //------------------------------------------ //the last step // //1, free global variables' memory //2, close log file's file pointer //------------------------------------------ void FinishMapReduce(Spec_t* spec) { Spec_t* g_spec = spec; //------------------------------------------- //1, free global variables' memory //------------------------------------------- free(g_spec->inputKeys); free(g_spec->inputVals); free(g_spec->inputOffsetSizes); if (g_spec->outputToHost == 1) { free(g_spec->outputKeys); free(g_spec->outputVals); free(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) free(g_spec->outputKeyListRange); } else { cudaFree(g_spec->outputKeys); cudaFree(g_spec->outputVals); cudaFree(g_spec->outputOffsetSizes); if (g_spec->workflow == MAP_GROUP) cudaFree(g_spec->outputKeyListRange); } free(g_spec); DoLog( "=====finish map/reduce====="); } #endif //__MRLIB_CU__
41c761009cc52b61eca25f2b2abe38885d24366e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define SORT_BY_MATERIAL 0 #define CACHE_FIRST_INTERSECTION 0 #define STREAM_COMPACTION 0 #define DOF false #define MOTION_BLUR 1 #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; static ShadeableIntersection * dev_first_intersections = NULL; bool cache_first_intersection = false; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need cache_first_intersection = false; hipMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created hipFree(dev_first_intersections); checkCUDAError("pathtraceFree"); } //Reference: PBRT source code //ConcentricSampleDisk __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) { float r, theta; float a, b; // Map uniform random numbers to $[-1,1]^2$ float sx = 2 * u1 - 1; float sy = 2 * u2 - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { // Handle first region of disk r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { // Handle second region of disk r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { // Handle third region of disk r = -sx; theta = 4.0f - sy / r; } else { // Handle fourth region of disk r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; a = r * cosf(theta); b = r * sinf(theta); glm::vec2 returnValue(a, b); return returnValue; } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ //__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; thrust::default_random_engine rng = makeSeededRandomEngine(iter, (x + (y * cam.resolution.x)), 0); thrust::uniform_real_distribution<float> u01(0, 1); if (x < cam.resolution.x && y < cam.resolution.y) { //Calculate index int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // Depth of field // pbrt page 313 - 318 // The range of distances from the lens at which objects appear in focus is called the len's depth of field // TODO: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + u01(rng)) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + u01(rng)) - (float)cam.resolution.y * 0.5f) ); //segment.ray.direction = glm::normalize(cam.view // - cam.right * cam.pixelLength.x * ((float)(x) - (float)cam.resolution.x * 0.5f) // - cam.up * cam.pixelLength.y * ((float)(y) - (float)cam.resolution.y * 0.5f) // ); if (DOF == true) { glm::vec2 returnValue = ConcentricSampleDisk(u01(rng), u01(rng)); returnValue.x *= cam.lensRadius; returnValue.y *= cam.lensRadius; //pbrt page 318 float ft = glm::abs(cam.focalDistance / cam.view.z); glm::vec3 Pfocus = segment.ray.direction * ft + segment.ray.origin; segment.ray.origin += returnValue.x * cam.right + returnValue.y * cam.up; segment.ray.direction = glm::normalize(Pfocus - segment.ray.origin); } segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; if (pathSegment.remainingBounces < 0) { return; } float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; intersections[path_index].materialId = -1; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].position = intersect_point; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces < 0) { return; } // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = -1; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { scatterRay(pathSegments[idx], intersection.position, intersection.surfaceNormal, material, rng); if (pathSegments[idx].remainingBounces < 0) { pathSegments[idx].color = glm::vec3(0.f); } //float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); //pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; //pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = -1; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths && iterationPaths[index].remainingBounces < 0) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } //thrust document struct isPathAlive { __host__ __device__ bool operator()(const PathSegment& path_segment) { return path_segment.remainingBounces >= 0; } }; //Sort by material struct MaterialComparator { __host__ __device__ bool operator() (const ShadeableIntersection& a, const ShadeableIntersection& b) { return a.materialId < b.materialId; } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *hstSceneGeoms = &(hst_scene->geoms)[0]; Geom *motionBlurGeoms = &(hst_scene->geoms)[0]; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing //toggle between motion blur and non-motion-blur #if MOTION_BLUR thrust::default_random_engine rng = makeSeededRandomEngine(iter, hst_scene->geoms.size(), traceDepth); thrust::uniform_real_distribution<float> u02PI(0, TWO_PI); for (int i = 0; i < hst_scene->geoms.size(); i++) { motionBlurGeoms[i] = hstSceneGeoms[i]; motionBlurGeoms[i].translation.x += hstSceneGeoms[i].motion.x * 0.08 * cos(u02PI(rng)); motionBlurGeoms[i].translation.y += hstSceneGeoms[i].motion.y * 0.08 * cos(u02PI(rng)); motionBlurGeoms[i].translation.z += hstSceneGeoms[i].motion.z * 0.08 * cos(u02PI(rng)); //keep on updating all its transform information motionBlurGeoms[i].transform = utilityCore::buildTransformationMatrix(motionBlurGeoms[i].translation, motionBlurGeoms[i].rotation, motionBlurGeoms[i].scale); motionBlurGeoms[i].invTranspose = glm::inverseTranspose(motionBlurGeoms[i].transform); motionBlurGeoms[i].inverseTransform = glm::inverse(motionBlurGeoms[i].transform); } hipMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); #else hipMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); #endif // generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; thrust::device_ptr<PathSegment> thrust_dev_paths(dev_paths); PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int num_paths_alive = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; //*******************************************************// //******************big while begin**********************// while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths_alive + blockSize1d - 1) / blockSize1d; if (depth == 0 && cache_first_intersection) { hipMemcpy(dev_intersections, dev_first_intersections, num_paths * sizeof(dev_first_intersections[0]), hipMemcpyDeviceToDevice); } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths_alive , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); } checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; //Toggle between cache first intersection #if CACHE_FIRST_INTERSECTION if (!cache_first_intersection) { cache_first_intersection = true; hipMemcpy(dev_first_intersections, dev_intersections, num_paths * sizeof(dev_first_intersections[0]), hipMemcpyDeviceToDevice); } #endif //Toggle sort by material. WHY SO SLOW......(not using it) #if SORT_BY_MATERIAL //Sort by material thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths_alive, dev_paths, MaterialComparator()); #endif // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths_alive, dev_intersections, dev_paths, dev_materials ); //Toggle stream compaction //Google group reminder: using thrust::partition #if STREAM_COMPACTION num_paths_alive = thrust::partition(thrust::device, thrust_dev_paths, thrust_dev_paths + num_paths_alive, isPathAlive()) - thrust_dev_paths; iterationComplete = (num_paths_alive <= 0); // TODO: should be based off stream compaction results. #else num_paths_alive = num_paths; iterationComplete = (num_paths_alive <= 0) || depth > traceDepth; #endif } //*******************************************************// //*****************big while end*************************// // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
41c761009cc52b61eca25f2b2abe38885d24366e.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <glm/gtc/matrix_inverse.hpp> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define SORT_BY_MATERIAL 0 #define CACHE_FIRST_INTERSECTION 0 #define STREAM_COMPACTION 0 #define DOF false #define MOTION_BLUR 1 #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int)(pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int)(pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int)(pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; static ShadeableIntersection * dev_first_intersections = NULL; bool cache_first_intersection = false; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need cache_first_intersection = false; cudaMalloc(&dev_first_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_first_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created cudaFree(dev_first_intersections); checkCUDAError("pathtraceFree"); } //Reference: PBRT source code //ConcentricSampleDisk __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) { float r, theta; float a, b; // Map uniform random numbers to $[-1,1]^2$ float sx = 2 * u1 - 1; float sy = 2 * u2 - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { // Handle first region of disk r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { // Handle second region of disk r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { // Handle third region of disk r = -sx; theta = 4.0f - sy / r; } else { // Handle fourth region of disk r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; a = r * cosf(theta); b = r * sinf(theta); glm::vec2 returnValue(a, b); return returnValue; } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ //__global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; thrust::default_random_engine rng = makeSeededRandomEngine(iter, (x + (y * cam.resolution.x)), 0); thrust::uniform_real_distribution<float> u01(0, 1); if (x < cam.resolution.x && y < cam.resolution.y) { //Calculate index int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); // Depth of field // pbrt page 313 - 318 // The range of distances from the lens at which objects appear in focus is called the len's depth of field // TODO: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + u01(rng)) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + u01(rng)) - (float)cam.resolution.y * 0.5f) ); //segment.ray.direction = glm::normalize(cam.view // - cam.right * cam.pixelLength.x * ((float)(x) - (float)cam.resolution.x * 0.5f) // - cam.up * cam.pixelLength.y * ((float)(y) - (float)cam.resolution.y * 0.5f) // ); if (DOF == true) { glm::vec2 returnValue = ConcentricSampleDisk(u01(rng), u01(rng)); returnValue.x *= cam.lensRadius; returnValue.y *= cam.lensRadius; //pbrt page 318 float ft = glm::abs(cam.focalDistance / cam.view.z); glm::vec3 Pfocus = segment.ray.direction * ft + segment.ray.origin; segment.ray.origin += returnValue.x * cam.right + returnValue.y * cam.up; segment.ray.direction = glm::normalize(Pfocus - segment.ray.origin); } segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; if (pathSegment.remainingBounces < 0) { return; } float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; intersections[path_index].materialId = -1; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].position = intersect_point; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { if (pathSegments[idx].remainingBounces < 0) { return; } // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, pathSegments[idx].remainingBounces); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); pathSegments[idx].remainingBounces = -1; } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { scatterRay(pathSegments[idx], intersection.position, intersection.surfaceNormal, material, rng); if (pathSegments[idx].remainingBounces < 0) { pathSegments[idx].color = glm::vec3(0.f); } //float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); //pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; //pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); pathSegments[idx].remainingBounces = -1; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths && iterationPaths[index].remainingBounces < 0) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } //thrust document struct isPathAlive { __host__ __device__ bool operator()(const PathSegment& path_segment) { return path_segment.remainingBounces >= 0; } }; //Sort by material struct MaterialComparator { __host__ __device__ bool operator() (const ShadeableIntersection& a, const ShadeableIntersection& b) { return a.materialId < b.materialId; } }; /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; const Geom *hstSceneGeoms = &(hst_scene->geoms)[0]; Geom *motionBlurGeoms = &(hst_scene->geoms)[0]; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing //toggle between motion blur and non-motion-blur #if MOTION_BLUR thrust::default_random_engine rng = makeSeededRandomEngine(iter, hst_scene->geoms.size(), traceDepth); thrust::uniform_real_distribution<float> u02PI(0, TWO_PI); for (int i = 0; i < hst_scene->geoms.size(); i++) { motionBlurGeoms[i] = hstSceneGeoms[i]; motionBlurGeoms[i].translation.x += hstSceneGeoms[i].motion.x * 0.08 * cos(u02PI(rng)); motionBlurGeoms[i].translation.y += hstSceneGeoms[i].motion.y * 0.08 * cos(u02PI(rng)); motionBlurGeoms[i].translation.z += hstSceneGeoms[i].motion.z * 0.08 * cos(u02PI(rng)); //keep on updating all its transform information motionBlurGeoms[i].transform = utilityCore::buildTransformationMatrix(motionBlurGeoms[i].translation, motionBlurGeoms[i].rotation, motionBlurGeoms[i].scale); motionBlurGeoms[i].invTranspose = glm::inverseTranspose(motionBlurGeoms[i].transform); motionBlurGeoms[i].inverseTransform = glm::inverse(motionBlurGeoms[i].transform); } cudaMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); #else cudaMemcpy(dev_geoms, motionBlurGeoms, hst_scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); #endif // generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths); generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); int depth = 0; thrust::device_ptr<PathSegment> thrust_dev_paths(dev_paths); PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int num_paths_alive = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; //*******************************************************// //******************big while begin**********************// while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths_alive + blockSize1d - 1) / blockSize1d; if (depth == 0 && cache_first_intersection) { cudaMemcpy(dev_intersections, dev_first_intersections, num_paths * sizeof(dev_first_intersections[0]), cudaMemcpyDeviceToDevice); } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths_alive , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections ); } checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; //Toggle between cache first intersection #if CACHE_FIRST_INTERSECTION if (!cache_first_intersection) { cache_first_intersection = true; cudaMemcpy(dev_first_intersections, dev_intersections, num_paths * sizeof(dev_first_intersections[0]), cudaMemcpyDeviceToDevice); } #endif //Toggle sort by material. WHY SO SLOW......(not using it) #if SORT_BY_MATERIAL //Sort by material thrust::sort_by_key(thrust::device, dev_intersections, dev_intersections + num_paths_alive, dev_paths, MaterialComparator()); #endif // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. shadeFakeMaterial << <numblocksPathSegmentTracing, blockSize1d >> > ( iter, num_paths_alive, dev_intersections, dev_paths, dev_materials ); //Toggle stream compaction //Google group reminder: using thrust::partition #if STREAM_COMPACTION num_paths_alive = thrust::partition(thrust::device, thrust_dev_paths, thrust_dev_paths + num_paths_alive, isPathAlive()) - thrust_dev_paths; iterationComplete = (num_paths_alive <= 0); // TODO: should be based off stream compaction results. #else num_paths_alive = num_paths; iterationComplete = (num_paths_alive <= 0) || depth > traceDepth; #endif } //*******************************************************// //*****************big while end*************************// // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
80369eade15ecf1ac7dd43437c41a28e28eace5d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/detail/binaryop.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/unary.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/unary.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> namespace cudf { namespace detail { namespace { // anonymous namespace template <typename _TargetT> struct unary_cast { template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_numeric<SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return static_cast<TargetT>(element); } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { // Convert source tick counts into target tick counts without blindly truncating them // by dividing the respective duration time periods (which may not work for time before // UNIX epoch) return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT>(element)}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_duration<TargetT>()>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{static_cast<typename TargetT::rep>(element)}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<cudf::is_duration<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return static_cast<TargetT>(element.count()); } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)}; } }; template <typename _SourceT, typename _TargetT> struct fixed_point_unary_cast { numeric::scale_type scale; using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>; using DeviceT = device_storage_type_t<FixedPointT>; template <typename SourceT = _SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element) { auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}}; return static_cast<TargetT>(fp); } template <typename SourceT = _SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element) { return TargetT{element, scale}.value(); } }; template <typename From, typename To> constexpr inline auto is_supported_non_fixed_point_cast() { return cudf::is_fixed_width<To>() && // Disallow fixed_point here (requires different specialization) !(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) && // Disallow conversions between timestamps and numeric !(cudf::is_timestamp<From>() && is_numeric<To>()) && !(cudf::is_timestamp<To>() && is_numeric<From>()); } template <typename From, typename To> constexpr inline auto is_supported_fixed_point_cast() { return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) || (cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) || (cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>()); } template <typename From, typename To> constexpr inline auto is_supported_cast() { return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>(); } template <typename From, typename To> struct device_cast { __device__ To operator()(From element) { return static_cast<To>(element); } }; /** * @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new * @p scale * * @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`) * @param input Input `column_view` * @param scale `scale` of the returned `column` * @param mr Device memory resource used to allocate the returned column's device memory * @param stream CUDA stream used for device memory operations and kernel launches * * @return std::unique_ptr<column> Returned column with new @p scale */ template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> rescale(column_view input, numeric::scale_type scale, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using namespace numeric; if (input.type().scale() > scale) { auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale}); auto const type = cudf::data_type{cudf::type_to_id<T>(), scale}; return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr); } else { auto const diff = input.type().scale() - scale; auto const scalar = make_fixed_point_scalar<T>(::pow(10, -diff), scale_type{diff}); auto const type = cudf::data_type{cudf::type_to_id<T>(), scale}; return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr); } }; template <typename _SourceT> struct dispatch_unary_cast_to { column_view input; dispatch_unary_cast_to(column_view inp) : input(inp) {} template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, detail::copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; thrust::transform(rmm::exec_policy(stream), input.begin<SourceT>(), input.end<SourceT>(), output_mutable.begin<TargetT>(), unary_cast<TargetT>{}); return output; } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; using DeviceT = device_storage_type_t<SourceT>; auto const scale = numeric::scale_type{input.type().scale()}; thrust::transform(rmm::exec_policy(stream), input.begin<DeviceT>(), input.end<DeviceT>(), output_mutable.begin<TargetT>(), fixed_point_unary_cast<SourceT, TargetT>{scale}); return output; } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_fixed_point<TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; using DeviceT = device_storage_type_t<TargetT>; auto const scale = numeric::scale_type{type.scale()}; thrust::transform(rmm::exec_policy(stream), input.begin<SourceT>(), input.end<SourceT>(), output_mutable.begin<DeviceT>(), fixed_point_unary_cast<SourceT, TargetT>{scale}); return output; } template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() && std::is_same<SourceT, TargetT>::value>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr); } template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() && not std::is_same<SourceT, TargetT>::value>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using namespace numeric; auto const size = input.size(); auto temporary = std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()}, size, rmm::device_buffer{size * cudf::size_of(type), stream}, copy_bitmask(input, stream), input.null_count()); using SourceDeviceT = device_storage_type_t<SourceT>; using TargetDeviceT = device_storage_type_t<TargetT>; mutable_column_view output_mutable = *temporary; thrust::transform(rmm::exec_policy(stream), input.begin<SourceDeviceT>(), input.end<SourceDeviceT>(), output_mutable.begin<TargetDeviceT>(), device_cast<SourceDeviceT, TargetDeviceT>{}); // clearly there is a more efficient way to do this, can optimize in the future return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr); } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (!cudf::is_fixed_width<TargetT>()) CUDF_FAIL("Column type must be numeric or chrono or decimal32/64"); else if (cudf::is_fixed_point<SourceT>()) CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported"); else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>()) CUDF_FAIL("Timestamps can be created only from duration"); else CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration"); } }; struct dispatch_unary_cast_from { column_view input; dispatch_unary_cast_from(column_view inp) : input(inp) {} template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr); } template <typename T, typename std::enable_if_t<!cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Column type must be numeric or chrono or decimal32/64"); } }; } // anonymous namespace std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width."); return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr); } } // namespace detail std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::cast(input, type, rmm::cuda_stream_default, mr); } } // namespace cudf
80369eade15ecf1ac7dd43437c41a28e28eace5d.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/detail/binaryop.hpp> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/unary.hpp> #include <cudf/fixed_point/fixed_point.hpp> #include <cudf/null_mask.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <cudf/unary.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> namespace cudf { namespace detail { namespace { // anonymous namespace template <typename _TargetT> struct unary_cast { template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_numeric<SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return static_cast<TargetT>(element); } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { // Convert source tick counts into target tick counts without blindly truncating them // by dividing the respective duration time periods (which may not work for time before // UNIX epoch) return TargetT{cuda::std::chrono::floor<TargetT::duration>(element.time_since_epoch())}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT>(element)}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_duration<TargetT>()>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{static_cast<typename TargetT::rep>(element)}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_timestamp<SourceT>() && cudf::is_duration<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT>(element.time_since_epoch())}; } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<cudf::is_duration<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return static_cast<TargetT>(element.count()); } template <typename SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_duration<SourceT>() && cudf::is_timestamp<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(SourceT const element) { return TargetT{cuda::std::chrono::floor<TargetT::duration>(element)}; } }; template <typename _SourceT, typename _TargetT> struct fixed_point_unary_cast { numeric::scale_type scale; using FixedPointT = std::conditional_t<cudf::is_fixed_point<_SourceT>(), _SourceT, _TargetT>; using DeviceT = device_storage_type_t<FixedPointT>; template <typename SourceT = _SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE TargetT operator()(DeviceT const element) { auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}}; return static_cast<TargetT>(fp); } template <typename SourceT = _SourceT, typename TargetT = _TargetT, typename std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point<TargetT>())>* = nullptr> CUDA_DEVICE_CALLABLE DeviceT operator()(SourceT const element) { return TargetT{element, scale}.value(); } }; template <typename From, typename To> constexpr inline auto is_supported_non_fixed_point_cast() { return cudf::is_fixed_width<To>() && // Disallow fixed_point here (requires different specialization) !(cudf::is_fixed_point<From>() || cudf::is_fixed_point<To>()) && // Disallow conversions between timestamps and numeric !(cudf::is_timestamp<From>() && is_numeric<To>()) && !(cudf::is_timestamp<To>() && is_numeric<From>()); } template <typename From, typename To> constexpr inline auto is_supported_fixed_point_cast() { return (cudf::is_fixed_point<From>() && cudf::is_numeric<To>()) || (cudf::is_numeric<From>() && cudf::is_fixed_point<To>()) || (cudf::is_fixed_point<From>() && cudf::is_fixed_point<To>()); } template <typename From, typename To> constexpr inline auto is_supported_cast() { return is_supported_non_fixed_point_cast<From, To>() || is_supported_fixed_point_cast<From, To>(); } template <typename From, typename To> struct device_cast { __device__ To operator()(From element) { return static_cast<To>(element); } }; /** * @brief Takes a `fixed_point` column_view as @p input and returns a `fixed_point` column with new * @p scale * * @tparam T Type of the `fixed_point` column_view (`decimal32` or `decimal64`) * @param input Input `column_view` * @param scale `scale` of the returned `column` * @param mr Device memory resource used to allocate the returned column's device memory * @param stream CUDA stream used for device memory operations and kernel launches * * @return std::unique_ptr<column> Returned column with new @p scale */ template <typename T, typename std::enable_if_t<is_fixed_point<T>()>* = nullptr> std::unique_ptr<column> rescale(column_view input, numeric::scale_type scale, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using namespace numeric; if (input.type().scale() > scale) { auto const scalar = make_fixed_point_scalar<T>(0, scale_type{scale}); auto const type = cudf::data_type{cudf::type_to_id<T>(), scale}; return detail::binary_operation(input, *scalar, binary_operator::ADD, type, stream, mr); } else { auto const diff = input.type().scale() - scale; auto const scalar = make_fixed_point_scalar<T>(std::pow(10, -diff), scale_type{diff}); auto const type = cudf::data_type{cudf::type_to_id<T>(), scale}; return detail::binary_operation(input, *scalar, binary_operator::DIV, type, stream, mr); } }; template <typename _SourceT> struct dispatch_unary_cast_to { column_view input; dispatch_unary_cast_to(column_view inp) : input(inp) {} template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<is_supported_non_fixed_point_cast<SourceT, TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, detail::copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; thrust::transform(rmm::exec_policy(stream), input.begin<SourceT>(), input.end<SourceT>(), output_mutable.begin<TargetT>(), unary_cast<TargetT>{}); return output; } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_numeric<TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; using DeviceT = device_storage_type_t<SourceT>; auto const scale = numeric::scale_type{input.type().scale()}; thrust::transform(rmm::exec_policy(stream), input.begin<DeviceT>(), input.end<DeviceT>(), output_mutable.begin<TargetT>(), fixed_point_unary_cast<SourceT, TargetT>{scale}); return output; } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_numeric<SourceT>() && cudf::is_fixed_point<TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const size = input.size(); auto output = std::make_unique<column>(type, size, rmm::device_buffer{size * cudf::size_of(type), stream, mr}, copy_bitmask(input, stream, mr), input.null_count()); mutable_column_view output_mutable = *output; using DeviceT = device_storage_type_t<TargetT>; auto const scale = numeric::scale_type{type.scale()}; thrust::transform(rmm::exec_policy(stream), input.begin<SourceT>(), input.end<SourceT>(), output_mutable.begin<DeviceT>(), fixed_point_unary_cast<SourceT, TargetT>{scale}); return output; } template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() && std::is_same<SourceT, TargetT>::value>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (input.type() == type) return std::make_unique<column>(input); // TODO add test for this return detail::rescale<TargetT>(input, numeric::scale_type{type.scale()}, stream, mr); } template < typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<cudf::is_fixed_point<SourceT>() && cudf::is_fixed_point<TargetT>() && not std::is_same<SourceT, TargetT>::value>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { using namespace numeric; auto const size = input.size(); auto temporary = std::make_unique<column>(cudf::data_type{type.id(), input.type().scale()}, size, rmm::device_buffer{size * cudf::size_of(type), stream}, copy_bitmask(input, stream), input.null_count()); using SourceDeviceT = device_storage_type_t<SourceT>; using TargetDeviceT = device_storage_type_t<TargetT>; mutable_column_view output_mutable = *temporary; thrust::transform(rmm::exec_policy(stream), input.begin<SourceDeviceT>(), input.end<SourceDeviceT>(), output_mutable.begin<TargetDeviceT>(), device_cast<SourceDeviceT, TargetDeviceT>{}); // clearly there is a more efficient way to do this, can optimize in the future return rescale<TargetT>(*temporary, numeric::scale_type{type.scale()}, stream, mr); } template <typename TargetT, typename SourceT = _SourceT, typename std::enable_if_t<not is_supported_cast<SourceT, TargetT>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (!cudf::is_fixed_width<TargetT>()) CUDF_FAIL("Column type must be numeric or chrono or decimal32/64"); else if (cudf::is_fixed_point<SourceT>()) CUDF_FAIL("Currently only decimal32/64 to floating point/integral is supported"); else if (cudf::is_timestamp<SourceT>() && is_numeric<TargetT>()) CUDF_FAIL("Timestamps can be created only from duration"); else CUDF_FAIL("Timestamps cannot be converted to numeric without converting it to a duration"); } }; struct dispatch_unary_cast_from { column_view input; dispatch_unary_cast_from(column_view inp) : input(inp) {} template <typename T, typename std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return type_dispatcher(type, dispatch_unary_cast_to<T>{input}, type, stream, mr); } template <typename T, typename std::enable_if_t<!cudf::is_fixed_width<T>()>* = nullptr> std::unique_ptr<column> operator()(data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_FAIL("Column type must be numeric or chrono or decimal32/64"); } }; } // anonymous namespace std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(is_fixed_width(type), "Unary cast type must be fixed-width."); return type_dispatcher(input.type(), detail::dispatch_unary_cast_from{input}, type, stream, mr); } } // namespace detail std::unique_ptr<column> cast(column_view const& input, data_type type, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::cast(input, type, rmm::cuda_stream_default, mr); } } // namespace cudf
2053dfe3111d593ce6eb024880849f932dc1eb32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of hipcub::BlockScan * * Example compilation string: * * nvcc example_block_scan_sum.cu -gencode=arch=compute_20,code=\"sm_20,compute_20\" -o example_block_scan_sum * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <iostream> #include <hipcub/hipcub.hpp> using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; int g_iterations = 100; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide exclusive prefix sum over integers */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void BlockPrefixSumKernel( int *d_in, // Tile of input int *d_out, // Tile of output clock_t *d_elapsed) // Elapsed cycle count of block scan { // Parameterize BlockScan type for our thread block typedef BlockScan<int, BLOCK_THREADS> BlockScanT; // Shared memory __shared__ typename BlockScanT::SmemStorage smem_storage; // Per-thread tile data int data[ITEMS_PER_THREAD]; BlockLoadVectorized(d_in, data); // Start cycle timer clock_t start = clock(); // Compute exclusive prefix sum int aggregate; BlockScanT::ExclusiveSum(smem_storage, data, data, aggregate); // Stop cycle timer clock_t stop = clock(); // Store output BlockStoreVectorized(d_out, data); // Store aggregate and elapsed clocks if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate; } } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize exclusive prefix sum problem (and solution). * Returns the aggregate */ int Initialize( int *h_in, int *h_reference, int num_elements) { int inclusive = 0; for (int i = 0; i < num_elements; ++i) { h_in[i] = i % 17; h_reference[i] = inclusive; inclusive += h_in[i]; } return inclusive; } /** * Test thread block scan */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> void Test() { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Allocate host arrays int *h_in = new int[TILE_SIZE]; int *h_reference = new int[TILE_SIZE]; int *h_gpu = new int[TILE_SIZE + 1]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; clock_t *d_elapsed = NULL; hipMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); hipMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1)); hipMalloc((void**)&d_elapsed, sizeof(clock_t)); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < TILE_SIZE; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Copy problem to device hipMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, hipMemcpyHostToDevice); printf("BlockScan %d items (%d threads, %d items per thread): ", TILE_SIZE, BLOCK_THREADS, ITEMS_PER_THREAD); // Run this several times and average the performance results clock_t elapsed_scan_clocks = 0; for (int i = 0; i < g_iterations; ++i) { // Run aggregate/prefix kernel hipLaunchKernelGGL(( BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD>), dim3(1), dim3(BLOCK_THREADS), 0, 0, d_in, d_out, d_elapsed); // Copy results from device clock_t scan_clocks; hipMemcpy(h_gpu, d_out, sizeof(int) * (TILE_SIZE + 1), hipMemcpyDeviceToHost); hipMemcpy(&scan_clocks, d_elapsed, sizeof(clock_t), hipMemcpyDeviceToHost); elapsed_scan_clocks += scan_clocks; } // Check scanned items bool correct = true; for (int i = 0; i < TILE_SIZE; i++) { if (h_gpu[i] != h_reference[i]) { printf("Incorrect result @ offset %d (%d != %d)\n", i, h_gpu[i], h_reference[i]); correct = false; break; } } // Check total aggregate if (h_gpu[TILE_SIZE] != h_aggregate) { printf("Incorrect aggregate (%d != %d)\n", h_gpu[TILE_SIZE], h_aggregate); correct = false; } if (correct) printf("Correct!\n"); // Display results problem data if (g_verbose) { printf("GPU output (reference output): "); for (int i = 0; i < TILE_SIZE; i++) printf("%d (%d), ", h_gpu[i], h_reference[i]); printf("\n"); printf("GPU aggregate (reference aggregate)", h_gpu[TILE_SIZE], h_aggregate); printf("\n\n"); } // Display timing results printf("Average clocks per 32-bit int scanned: %.3f\n\n", float(elapsed_scan_clocks) / TILE_SIZE / g_iterations); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_gpu) delete[] h_gpu; if (d_in) hipFree(d_in); if (d_out) hipFree(d_out); if (d_elapsed) hipFree(d_elapsed); } /** * Main */ int main(int argc, char** argv) { // Display GPU name hipDeviceProp_t props; hipGetDeviceProperties(&props, 0); printf("Using device %s\n", props.name); /** Add tests here **/ // Run tests Test<1024, 1>(); Test<512, 2>(); Test<256, 4>(); Test<128, 8>(); Test<64, 16>(); Test<32, 32>(); Test<16, 64>(); /****/ return 0; }
2053dfe3111d593ce6eb024880849f932dc1eb32.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Simple demonstration of cub::BlockScan * * Example compilation string: * * nvcc example_block_scan_sum.cu -gencode=arch=compute_20,code=\"sm_20,compute_20\" -o example_block_scan_sum * ******************************************************************************/ // Ensure printing of CUDA runtime errors to console (define before including cub.h) #define CUB_STDERR #include <stdio.h> #include <iostream> #include <cub/cub.cuh> using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- bool g_verbose = false; int g_iterations = 100; //--------------------------------------------------------------------- // Kernels //--------------------------------------------------------------------- /** * Simple kernel for performing a block-wide exclusive prefix sum over integers */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void BlockPrefixSumKernel( int *d_in, // Tile of input int *d_out, // Tile of output clock_t *d_elapsed) // Elapsed cycle count of block scan { // Parameterize BlockScan type for our thread block typedef BlockScan<int, BLOCK_THREADS> BlockScanT; // Shared memory __shared__ typename BlockScanT::SmemStorage smem_storage; // Per-thread tile data int data[ITEMS_PER_THREAD]; BlockLoadVectorized(d_in, data); // Start cycle timer clock_t start = clock(); // Compute exclusive prefix sum int aggregate; BlockScanT::ExclusiveSum(smem_storage, data, data, aggregate); // Stop cycle timer clock_t stop = clock(); // Store output BlockStoreVectorized(d_out, data); // Store aggregate and elapsed clocks if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; d_out[BLOCK_THREADS * ITEMS_PER_THREAD] = aggregate; } } //--------------------------------------------------------------------- // Host utilities //--------------------------------------------------------------------- /** * Initialize exclusive prefix sum problem (and solution). * Returns the aggregate */ int Initialize( int *h_in, int *h_reference, int num_elements) { int inclusive = 0; for (int i = 0; i < num_elements; ++i) { h_in[i] = i % 17; h_reference[i] = inclusive; inclusive += h_in[i]; } return inclusive; } /** * Test thread block scan */ template < int BLOCK_THREADS, int ITEMS_PER_THREAD> void Test() { const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD; // Allocate host arrays int *h_in = new int[TILE_SIZE]; int *h_reference = new int[TILE_SIZE]; int *h_gpu = new int[TILE_SIZE + 1]; // Initialize problem and reference output on host int h_aggregate = Initialize(h_in, h_reference, TILE_SIZE); // Initialize device arrays int *d_in = NULL; int *d_out = NULL; clock_t *d_elapsed = NULL; cudaMalloc((void**)&d_in, sizeof(int) * TILE_SIZE); cudaMalloc((void**)&d_out, sizeof(int) * (TILE_SIZE + 1)); cudaMalloc((void**)&d_elapsed, sizeof(clock_t)); // Display input problem data if (g_verbose) { printf("Input data: "); for (int i = 0; i < TILE_SIZE; i++) printf("%d, ", h_in[i]); printf("\n\n"); } // Copy problem to device cudaMemcpy(d_in, h_in, sizeof(int) * TILE_SIZE, cudaMemcpyHostToDevice); printf("BlockScan %d items (%d threads, %d items per thread): ", TILE_SIZE, BLOCK_THREADS, ITEMS_PER_THREAD); // Run this several times and average the performance results clock_t elapsed_scan_clocks = 0; for (int i = 0; i < g_iterations; ++i) { // Run aggregate/prefix kernel BlockPrefixSumKernel<BLOCK_THREADS, ITEMS_PER_THREAD><<<1, BLOCK_THREADS>>>( d_in, d_out, d_elapsed); // Copy results from device clock_t scan_clocks; cudaMemcpy(h_gpu, d_out, sizeof(int) * (TILE_SIZE + 1), cudaMemcpyDeviceToHost); cudaMemcpy(&scan_clocks, d_elapsed, sizeof(clock_t), cudaMemcpyDeviceToHost); elapsed_scan_clocks += scan_clocks; } // Check scanned items bool correct = true; for (int i = 0; i < TILE_SIZE; i++) { if (h_gpu[i] != h_reference[i]) { printf("Incorrect result @ offset %d (%d != %d)\n", i, h_gpu[i], h_reference[i]); correct = false; break; } } // Check total aggregate if (h_gpu[TILE_SIZE] != h_aggregate) { printf("Incorrect aggregate (%d != %d)\n", h_gpu[TILE_SIZE], h_aggregate); correct = false; } if (correct) printf("Correct!\n"); // Display results problem data if (g_verbose) { printf("GPU output (reference output): "); for (int i = 0; i < TILE_SIZE; i++) printf("%d (%d), ", h_gpu[i], h_reference[i]); printf("\n"); printf("GPU aggregate (reference aggregate)", h_gpu[TILE_SIZE], h_aggregate); printf("\n\n"); } // Display timing results printf("Average clocks per 32-bit int scanned: %.3f\n\n", float(elapsed_scan_clocks) / TILE_SIZE / g_iterations); // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_gpu) delete[] h_gpu; if (d_in) cudaFree(d_in); if (d_out) cudaFree(d_out); if (d_elapsed) cudaFree(d_elapsed); } /** * Main */ int main(int argc, char** argv) { // Display GPU name cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); printf("Using device %s\n", props.name); /** Add tests here **/ // Run tests Test<1024, 1>(); Test<512, 2>(); Test<256, 4>(); Test<128, 8>(); Test<64, 16>(); Test<32, 32>(); Test<16, 64>(); /****/ return 0; }
ab48a28898b8fd6462ec962e9540a08787505ac7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // TODO: reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { struct LpPoolFunctor { explicit LpPoolFunctor(const OperatorBase& /* op */) {} }; } // namespace namespace { using c10::hip::compat::abs; using c10::hip::compat::pow; template <typename T> __global__ void LpPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += pow(abs(bottom_data[bottom_offset + h * width + w]), p); } } top_data[index] = pow(top_data[index], static_cast<T>(1.0) / p); } } template <typename T> __global__ void LpPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += pow( abs(bottom_data[bottom_offset + (h * width + w) * channels]), p); } } top_data[index] = pow(output, static_cast<T>(1.0) / p); } } template <typename T> __global__ void LpPoolBackwardNCHW( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const int p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; const T* const top_data_slice = top_data + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[ph * pooled_width + pw] * bottom_data[index] * pow(abs(bottom_data[index]), p - 2) / pow(top_data_slice[ph * pooled_width + pw], p - 1); } } bottom_diff[index] = gradient; } } template <typename T> __global__ void LpPoolBackwardNHWC( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; const T* const top_data_slice = top_data + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] * bottom_data[index] * pow(abs(bottom_data[index]), p - 2) / pow(top_data_slice[(ph * pooled_width + pw) * channels], p - 1); } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<float>()); int output_size = Y->size(); hipLaunchKernelGGL(( LpPoolForwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(3)); auto* Y = Output(0, sizes, at::dtype<float>()); int output_size = Y->size(); hipLaunchKernelGGL(( LpPoolForwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0, X.sizes(), at::dtype<float>()); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)}); hipLaunchKernelGGL(( LpPoolBackwardNCHW<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0, X.sizes(), at::dtype<float>()); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)}); hipLaunchKernelGGL(( LpPoolBackwardNHWC<float>) , dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPoolFunctor>); REGISTER_CUDA_OPERATOR( LpPoolGradient, PoolGradientOp<float, CUDAContext, LpPoolFunctor>); }
ab48a28898b8fd6462ec962e9540a08787505ac7.cu
// TODO: reduce the apparent redundancy of all the code below. #include <cfloat> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/pool_op.h" namespace caffe2 { namespace { struct LpPoolFunctor { explicit LpPoolFunctor(const OperatorBase& /* op */) {} }; } // namespace namespace { using c10::cuda::compat::abs; using c10::cuda::compat::pow; template <typename T> __global__ void LpPoolForwardNCHW( const int nthreads, const T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index; int pw = n % pooled_width; n /= pooled_width; int ph = n % pooled_height; n /= pooled_height; int c = n % channels; n /= channels; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); top_data[index] = 0; int bottom_offset = (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { top_data[index] += pow(abs(bottom_data[bottom_offset + h * width + w]), p); } } top_data[index] = pow(top_data[index], static_cast<T>(1.0) / p); } } template <typename T> __global__ void LpPoolForwardNHWC( const int nthreads, const T* bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* top_data, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int c = index % channels; int pw = (index / channels) % pooled_width; int ph = (index / channels / pooled_width) % pooled_height; int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); T output = 0; int bottom_offset = n * height * width * channels + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { output += pow( abs(bottom_data[bottom_offset + (h * width + w) * channels]), p); } } top_data[index] = pow(output, static_cast<T>(1.0) / p); } } template <typename T> __global__ void LpPoolBackwardNCHW( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const int p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_l; const int h = (index / width) % height + pad_t; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; const T* const top_data_slice = top_data + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[ph * pooled_width + pw] * bottom_data[index] * pow(abs(bottom_data[index]), p - 2) / pow(top_data_slice[ph * pooled_width + pw], p - 1); } } bottom_diff[index] = gradient; } } template <typename T> __global__ void LpPoolBackwardNHWC( const int nthreads, const T* const top_diff, const T* const top_data, const T* const bottom_data, const int num, const int height, const int width, const int channels, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_t, const int pad_l, T* const bottom_diff, const T p) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int c = index % channels; const int w = index / channels % width + pad_l; const int h = (index / channels / width) % height + pad_t; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); T gradient = 0; const T* const top_diff_slice = top_diff + n * pooled_height * pooled_width * channels + c; const T* const top_data_slice = top_data + n * pooled_height * pooled_width * channels + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_t; int wstart = pw * stride_w - pad_l; int hend = min(hstart + kernel_h, height); int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); gradient += top_diff_slice[(ph * pooled_width + pw) * channels] * bottom_data[index] * pow(abs(bottom_data[index]), p - 2) / pow(top_data_slice[(ph * pooled_width + pw) * channels], p - 1); } } bottom_diff[index] = gradient; } } } // namespace template <> bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<float>()); int output_size = Y->size(); LpPoolForwardNCHW<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(3)); auto* Y = Output(0, sizes, at::dtype<float>()); int output_size = Y->size(); LpPoolForwardNHWC<float> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(1), Y->dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>:: RunOnDeviceWithOrderNCHW() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0, X.sizes(), at::dtype<float>()); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)}); LpPoolBackwardNCHW<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } template <> bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>:: RunOnDeviceWithOrderNHWC() { auto& X = Input(0); auto& Y = Input(1); auto& dY = Input(2); CAFFE_ENFORCE_EQ(dY.ndim(), 4); auto* dX = Output(0, X.sizes(), at::dtype<float>()); ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)}); LpPoolBackwardNHWC<float> <<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), dY.data<float>(), Y.data<float>(), X.data<float>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(1), dY.dim32(2), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<float>(), OperatorBase::GetSingleArgument<float>("p", 2.0)); return true; } REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPoolFunctor>); REGISTER_CUDA_OPERATOR( LpPoolGradient, PoolGradientOp<float, CUDAContext, LpPoolFunctor>); }
55af1aea55f6b203eadc5930bc921dbefeec9c3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "Pass.h" #include "constants.h" #include "lattice_PAR.h" using namespace std; void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta) { REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0; for(int i=0;i<_Npart;i++) { avg_x+=y[i].x[x_]/_Npart; avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart; avg_y+=y[i].x[y_]/_Npart; avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart; avg_delta+=y[i].x[delta_]/_Npart; } for(int i=0;i<_Npart;i++) { sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart; sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart; sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart; } *PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp); *PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp); *PEdelta=sqrt(sig_delta); } int main(int argc, char** argv) { //initialization const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); COORD *part, *dpart; int size = _Npart * sizeof(COORD); part=(COORD*)malloc(size); hipMalloc(&dpart,size); REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta; int i,n; for(i=0;i<_Npart;i++) { do {Jx=gsl_ran_exponential(r, 2*E_x);} while(Jx>E_x*6); do {Jy=gsl_ran_exponential(r, 2*E_y);} while(Jy>E_y*6); phi_x=gsl_ran_flat(r,0,2*M_PI); phi_y=gsl_ran_flat(r,0,2*M_PI); part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x); part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x); part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y); part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y); part[i].x[z_]=0; part[i].x[delta_]=0.00; } // part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.00; hipMemcpy(dpart,part,size,hipMemcpyHostToDevice); ofstream outfile("abc.txt"); outfile.close(); for(n=0;n<_Nturn1;n++) { hipLaunchKernelGGL(( Track), dim3(_BlockNum),dim3(_ThreadNum), 0, 0, dpart,n); hipMemcpy(part,dpart,size,hipMemcpyDeviceToHost); emittance(part,&Ex,&Ey,&Sdelta); ofstream outfile("abc.txt",ios::app); outfile<<n<<" "<<Ex<<" "<<Ey<<" "<<Sdelta<<endl; // outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; // for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} } outfile.close(); } cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; free(part); hipFree(dpart); gsl_rng_free (r); }
55af1aea55f6b203eadc5930bc921dbefeec9c3b.cu
#include <iostream> #include <fstream> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <curand.h> #include <curand_kernel.h> #include "Pass.h" #include "constants.h" #include "lattice_PAR.h" using namespace std; void emittance(COORD *y, REAL *PEx, REAL *PEy, REAL *PEdelta) { REAL avg_x=0,avg_xp=0,avg_y=0,avg_yp=0,avg_delta=0,sig_xx=0,sig_xpxp=0,sig_xxp=0,sig_yy=0,sig_ypyp=0,sig_yyp=0,sig_delta=0; for(int i=0;i<_Npart;i++) { avg_x+=y[i].x[x_]/_Npart; avg_xp+=y[i].x[px_]/(1+y[i].x[delta_])/_Npart; avg_y+=y[i].x[y_]/_Npart; avg_yp+=y[i].x[py_]/(1+y[i].x[delta_])/_Npart; avg_delta+=y[i].x[delta_]/_Npart; } for(int i=0;i<_Npart;i++) { sig_xx+=(y[i].x[x_]-avg_x)*(y[i].x[x_]-avg_x)/_Npart; sig_xpxp+=(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_xxp+=(y[i].x[x_]-avg_x)*(y[i].x[px_]/(1+y[i].x[delta_])-avg_xp)/_Npart; sig_yy+=(y[i].x[y_]-avg_y)*(y[i].x[y_]-avg_y)/_Npart; sig_ypyp+=(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_yyp+=(y[i].x[y_]-avg_y)*(y[i].x[py_]/(1+y[i].x[delta_])-avg_yp)/_Npart; sig_delta+=(y[i].x[delta_]-avg_delta)*(y[i].x[delta_]-avg_delta)/_Npart; } *PEx=sqrt(sig_xx*sig_xpxp-sig_xxp*sig_xxp); *PEy=sqrt(sig_yy*sig_ypyp-sig_yyp*sig_yyp); *PEdelta=sqrt(sig_delta); } int main(int argc, char** argv) { //initialization const gsl_rng_type * T; gsl_rng * r; gsl_rng_env_setup(); T = gsl_rng_default; r = gsl_rng_alloc (T); COORD *part, *dpart; int size = _Npart * sizeof(COORD); part=(COORD*)malloc(size); cudaMalloc(&dpart,size); REAL phi_x,phi_y,Jx,Jy,Ex,Ey,Sdelta; int i,n; for(i=0;i<_Npart;i++) { do {Jx=gsl_ran_exponential(r, 2*E_x);} while(Jx>E_x*6); do {Jy=gsl_ran_exponential(r, 2*E_y);} while(Jy>E_y*6); phi_x=gsl_ran_flat(r,0,2*M_PI); phi_y=gsl_ran_flat(r,0,2*M_PI); part[i].x[x_]=sqrt(Jx*Beta_x)*cos(phi_x); part[i].x[px_]=sqrt(Jx/Beta_x)*sin(phi_x); part[i].x[y_]=sqrt(Jy*Beta_y)*cos(phi_y); part[i].x[py_]=sqrt(Jy/Beta_y)*sin(phi_y); part[i].x[z_]=0; part[i].x[delta_]=0.00; } // part[0].x[0]=0.000;part[0].x[1]=0.000;part[0].x[2]=0.000;part[0].x[3]=0.000;part[0].x[5]=0.00; cudaMemcpy(dpart,part,size,cudaMemcpyHostToDevice); ofstream outfile("abc.txt"); outfile.close(); for(n=0;n<_Nturn1;n++) { Track<<<_BlockNum,_ThreadNum>>>(dpart,n); cudaMemcpy(part,dpart,size,cudaMemcpyDeviceToHost); emittance(part,&Ex,&Ey,&Sdelta); ofstream outfile("abc.txt",ios::app); outfile<<n<<" "<<Ex<<" "<<Ey<<" "<<Sdelta<<endl; // outfile<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; // for(int k=0;k<_Npart;k++) { if(abs(part[k].x[0])>10||abs(part[k].x[1])>10||part[k].x[2]>10||part[k].x[3]>10||part[k].x[5]>2) {cout<<n<<" "<<part[k].x[0]<<" "<<part[k].x[1]<<" "<<part[k].x[2]<<" "<<part[k].x[3]<<" "<<part[k].x[4]<<" "<<part[k].x[5]<<endl;} } outfile.close(); } cout<<part[0].x[0]<<" "<<part[0].x[1]<<" "<<part[0].x[2]<<" "<<part[0].x[3]<<" "<<part[0].x[4]<<" "<<part[0].x[5]<<endl; free(part); cudaFree(dpart); gsl_rng_free (r); }
ec7a78d81312b1ff15a6fc3e03c630aae10cbb88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <execution/cuda/LaunchDims.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMinLinearKernel(const void* input, const sd::LongType* inputShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* output, const sd::LongType* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ const T* x; __shared__ T* z; __shared__ sd::LongType threadsPerSegment, start, finish; auto segment = blockIdx.x; if(blockIdx.x >= numOfClasses) return; if (threadIdx.x == 0) { x = reinterpret_cast<const T*>(input); z = reinterpret_cast<T*>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T*>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); if(zIndex >= zLen) return; start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); if(xIndex >= xLen) return; sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentMinLinearKernel(const void* input, const sd::LongType* inputShape, const void* indices, const sd::LongType* indicesShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* output, const sd::LongType* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, segment, zIndex; __shared__ const T* x; __shared__ T* z; __shared__ const I* y; // int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<const T*>(input); z = reinterpret_cast<T*>(output); y = reinterpret_cast<const I*>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentMin kernel template <typename T, typename I> static SD_KERNEL void segmentMinTadKernel(const void* inputBuf, const sd::LongType* inputShape, const sd::LongType* inputTads, const sd::LongType* inputTadOffsets, I* indices, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* outputBuf, const sd::LongType* outputShape, const sd::LongType* outputTads, const sd::LongType* outputTadOffsets, sd::LongType indicesLen) { __shared__ T* val; __shared__ sd::LongType len, zIndex, total; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; if(blockIdx.x >= indicesLen) return; auto segment = indices[blockIdx.x]; // / threadsPerSegment; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<const T*>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // // segmen min template <typename T, typename I> static void segmentMinFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; auto classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); auto classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); if (input->isVector()) { dim3 launchDims = segmentDims(numClasses,input->lengthOf()); hipLaunchKernelGGL(( segmentMinLinearKernel<T, I>), dim3(launchDims.y),dim3(launchDims.x), launchDims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dim3 launchDims = segmentTad(input->sizeAt(0)); hipLaunchKernelGGL(( segmentMinTadKernel<T, I>), dim3(launchDims.y), dim3(launchDims.x), launchDims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMinFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMinFunctor_, (context, input, indices, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMinFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims = getFillUpSegmentsDims(numOfClasses, indices->lengthOf()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); NDArray::prepareSpecialUse({output}, {input, indices}); if (input->isVector()) { hipLaunchKernelGGL(( unsortedSegmentMinLinearKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(DataTypeUtils::max<T>()); sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dims.x = input->sizeAt(0); hipLaunchKernelGGL(( segmentMinTadKernel<T, I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMinFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMinFunctor_, (context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } template <typename T, typename I> static SD_KERNEL void segmentMinBPLinearKernel(const void* inputBuf, const sd::LongType* inputShape, void* forwardOutput, const sd::LongType* forwardShape, void* eps, const sd::LongType* epsShape, const void* indicesBuf, const sd::LongType* indicesShape, void* outputBuf, const sd::LongType* outputShape) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::sd_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMinBPTadKernel(const void* inputBuf, const sd::LongType* inputShape, void* forwardOutput, const sd::LongType* forwardShape, void* eps, const sd::LongType* epsShape, const void* indicesBuf, const sd::LongType* indicesShape, void* outputBuf, const sd::LongType* outputShape, const sd::LongType* inputTad, const sd::LongType* inputOffsets, const sd::LongType* gradInTad, const sd::LongType* gradInOffsets, const sd::LongType* gradOutTad, const sd::LongType* gradOutOffsets, const sd::LongType* outTad, const sd::LongType* outOffsets) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; auto current = x + inputOffsets[i]; auto currentOut = z + outOffsets[i]; auto in = gradIn + gradInOffsets[segment]; auto outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::sd_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> sd::Status segmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); segmentMinFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); hipLaunchKernelGGL(( segmentMinBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradInTads = packGradIn->specialShapeInfo(); auto gradInTadOffsets = packGradIn->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); hipLaunchKernelGGL(( segmentMinBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // // segmen min sd::Status segmentMinFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMinFunctorBP_, (context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static sd::Status unsortedSegmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); unsortedSegmentMinFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); hipLaunchKernelGGL(( segmentMinBPLinearKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradInTads = packGradIn->specialShapeInfo(); auto gradInTadOffsets = packGradIn->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); hipLaunchKernelGGL(( segmentMinBPTadKernel<T, I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentMinFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMinFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
ec7a78d81312b1ff15a6fc3e03c630aae10cbb88.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]> // #include <array/NDArrayFactory.h> #include <exceptions/cuda_exception.h> #include <helpers/ConstantTadHelper.h> #include <helpers/PointersManager.h> #include <helpers/ShapeUtils.h> #include <helpers/TAD.h> #include <ops/declarable/helpers/segment.h> #include <ops/declarable/helpers/segment_common.h> #include <execution/cuda/LaunchDims.h> namespace sd { namespace ops { namespace helpers { // -------------------------------------------------------------------------------------------------------------- // // Segment ops linear kernels // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMinLinearKernel(const void* input, const sd::LongType* inputShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* output, const sd::LongType* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, zIndex; __shared__ const T* x; __shared__ T* z; __shared__ sd::LongType threadsPerSegment, start, finish; auto segment = blockIdx.x; if(blockIdx.x >= numOfClasses) return; if (threadIdx.x == 0) { x = reinterpret_cast<const T*>(input); z = reinterpret_cast<T*>(output); extern __shared__ unsigned char shmem[]; val = reinterpret_cast<T*>(shmem); xLen = shape::length(inputShape); zLen = shape::length(outputShape); if (segment < numOfClasses) { zIndex = shape::getIndexOffset(segment, outputShape); if(zIndex >= zLen) return; start = starts[segment]; finish = start + lengths[segment]; z[zIndex] = x[shape::getIndexOffset(start, inputShape)]; val[segment] = z[zIndex]; } } __syncthreads(); for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); if(xIndex >= xLen) return; sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void unsortedSegmentMinLinearKernel(const void* input, const sd::LongType* inputShape, const void* indices, const sd::LongType* indicesShape, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* output, const sd::LongType* outputShape) { __shared__ T* val; __shared__ sd::LongType xLen, zLen, segment, zIndex; __shared__ const T* x; __shared__ T* z; __shared__ const I* y; // int threadsPerSegment, start, finish; if (threadIdx.x == 0) { segment = blockIdx.x; x = reinterpret_cast<const T*>(input); z = reinterpret_cast<T*>(output); y = reinterpret_cast<const I*>(indices); xLen = shape::length(inputShape); zLen = shape::length(outputShape); zIndex = shape::getIndexOffset(segment, outputShape); if (lengths[segment] > 0) z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)]; else z[zIndex] = DataTypeUtils::max<T>(); } __syncthreads(); if (lengths[segment] > 0) for (auto e = threadIdx.x + 1; e < xLen; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputShape); auto yIndex = shape::getIndexOffset(e, indicesShape); if (y[yIndex] == segment) { sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } } // -------------------------------------------------------------------------------------------------------------- // // SegmentMin kernel template <typename T, typename I> static SD_KERNEL void segmentMinTadKernel(const void* inputBuf, const sd::LongType* inputShape, const sd::LongType* inputTads, const sd::LongType* inputTadOffsets, I* indices, sd::LongType* starts, sd::LongType* lengths, sd::LongType numOfClasses, void* outputBuf, const sd::LongType* outputShape, const sd::LongType* outputTads, const sd::LongType* outputTadOffsets, sd::LongType indicesLen) { __shared__ T* val; __shared__ sd::LongType len, zIndex, total; __shared__ T* z; __shared__ int threadsPerSegment, start, finish; if(blockIdx.x >= indicesLen) return; auto segment = indices[blockIdx.x]; // / threadsPerSegment; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment]; len = shape::length(inputTads); start = starts[segment]; finish = start + lengths[segment]; total = shape::sizeAt(inputShape, 0); } __syncthreads(); auto idx = blockIdx.x; if (blockIdx.x <= total) { auto x = reinterpret_cast<const T*>(inputBuf) + inputTadOffsets[idx]; if (blockIdx.x == start) { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } else { for (auto e = threadIdx.x; e < len; e += blockDim.x) { auto xIndex = shape::getIndexOffset(e, inputTads); auto zIndex = shape::getIndexOffset(e, outputTads); sd::math::atomics::sd_atomicMin(&z[zIndex], x[xIndex]); } } } } // -------------------------------------------------------------------------------------------------------------- // // segmen min template <typename T, typename I> static void segmentMinFunctor_(LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { auto stream = context->getCudaStream(); sd::LongType numClasses = indices->e<sd::LongType>(indices->lengthOf() - 1) + 1; auto classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); auto classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens); NDArray::prepareSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); if (input->isVector()) { dim3 launchDims = segmentDims(numClasses,input->lengthOf()); segmentMinLinearKernel<T, I><<<launchDims.y,launchDims.x, launchDims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dim3 launchDims = segmentTad(input->sizeAt(0)); segmentMinTadKernel<T, I><<<launchDims.y, launchDims.x, launchDims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, &classesRangesBegs, &classesRangesLens}); } // -------------------------------------------------------------------------------------------------------------- // void segmentMinFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentMinFunctor_, (context, input, indices, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static void unsortedSegmentMinFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { auto stream = context->getCudaStream(); NDArray classesRangesBegs = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); NDArray classesRangesLens = NDArrayFactory::create<sd::LongType>('c', {numOfClasses}, context); output->assign(DataTypeUtils::infOrMax<T>()); classesRangesBegs.assign(indices->lengthOf()); classesRangesLens.assign(0); dim3 dims = getFillUpSegmentsDims(numOfClasses, indices->lengthOf()); fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens); sd::LongType* begins = reinterpret_cast<sd::LongType*>(classesRangesBegs.specialBuffer()); sd::LongType* lengths = reinterpret_cast<sd::LongType*>(classesRangesLens.specialBuffer()); NDArray::prepareSpecialUse({output}, {input, indices}); if (input->isVector()) { unsortedSegmentMinLinearKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo()); } else { output->assign(DataTypeUtils::max<T>()); sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); dims.x = input->sizeAt(0); segmentMinTadKernel<T, I><<<dims.x, dims.y, dims.z, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets, indices->lengthOf()); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices}); } // -------------------------------------------------------------------------------------------------------------- // void unsortedSegmentMinFunctor(sd::LaunchContext* context, NDArray* input, NDArray* indices, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices}); output->nullify(); BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentMinFunctor_, (context, input, indices, numOfClasses, output), SD_NUMERIC_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices}); } template <typename T, typename I> static SD_KERNEL void segmentMinBPLinearKernel(const void* inputBuf, const sd::LongType* inputShape, void* forwardOutput, const sd::LongType* forwardShape, void* eps, const sd::LongType* epsShape, const void* indicesBuf, const sd::LongType* indicesShape, void* outputBuf, const sd::LongType* outputShape) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ sd::LongType xLen, gradLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); gradIn = reinterpret_cast<T*>(forwardOutput); gradOut = reinterpret_cast<T*>(eps); gradLen = shape::length(epsShape); } __syncthreads(); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = gridDim.x * blockDim.x; for (auto e = start; e < xLen; e += step) { auto zOffset = shape::getIndexOffset(e, outputShape); auto xOffset = shape::getIndexOffset(e, inputShape); auto yOffset = shape::getIndexOffset(e, indicesShape); auto classIndex = y[yOffset]; auto gradOffsetI = shape::getIndexOffset(classIndex, forwardShape); auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape); if (sd::math::sd_abs(gradIn[gradOffsetI] - x[xOffset]) <= T(1.e-6)) { z[zOffset] = gradOut[gradOffsetO]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> static SD_KERNEL void segmentMinBPTadKernel(const void* inputBuf, const sd::LongType* inputShape, void* forwardOutput, const sd::LongType* forwardShape, void* eps, const sd::LongType* epsShape, const void* indicesBuf, const sd::LongType* indicesShape, void* outputBuf, const sd::LongType* outputShape, const sd::LongType* inputTad, const sd::LongType* inputOffsets, const sd::LongType* gradInTad, const sd::LongType* gradInOffsets, const sd::LongType* gradOutTad, const sd::LongType* gradOutOffsets, const sd::LongType* outTad, const sd::LongType* outOffsets) { __shared__ const T* x; __shared__ T* gradIn; __shared__ T* gradOut; __shared__ const I* y; __shared__ T* z; __shared__ sd::LongType xLen, yLen, gradLen, currentLen; if (threadIdx.x == 0) { xLen = shape::length(inputShape); x = reinterpret_cast<const T*>(inputBuf); y = reinterpret_cast<const I*>(indicesBuf); z = reinterpret_cast<T*>(outputBuf); yLen = shape::length(indicesShape); gradOut = reinterpret_cast<T*>(eps); gradIn = reinterpret_cast<T*>(forwardOutput); gradLen = shape::length(epsShape); currentLen = shape::length(outTad); } __syncthreads(); for (auto i = blockIdx.x; i < yLen; i += gridDim.x) { auto yIndex = shape::getIndexOffset(i, indicesShape); auto segment = y[yIndex]; auto current = x + inputOffsets[i]; auto currentOut = z + outOffsets[i]; auto in = gradIn + gradInOffsets[segment]; auto outGrad = gradOut + gradOutOffsets[segment]; for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) { if (sd::math::sd_abs(in[e] - current[e]) <= T(1.e-6)) currentOut[e] = outGrad[e]; } } } // -------------------------------------------------------------------------------------------------------------- // template <typename T, typename I> sd::Status segmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); //->shapeInfo(), context); segmentMinFunctor_<T, I>(context, input, indices, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); segmentMinBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(),1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradInTads = packGradIn->specialShapeInfo(); auto gradInTadOffsets = packGradIn->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); segmentMinBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // // segmen min sd::Status segmentMinFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentMinFunctorBP_, (context, input, indices, gradOut, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } template <typename T, typename I> static sd::Status unsortedSegmentMinFunctorBP_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { // if input is a vector: (as if in doc sample) auto stream = context->getCudaStream(); NDArray tempRes(gradOut->ordering(), gradOut->getShapeAsVector(), DataTypeUtils::fromT<T>(), context); unsortedSegmentMinFunctor_<T, I>(context, input, indices, numOfClasses, &tempRes); NDArray::prepareSpecialUse({output}, {input, indices, gradOut, &tempRes}); if (input->isVector()) { sd::LongType loop_size = input->lengthOf(); auto numOfClasses = gradOut->lengthOf(); segmentMinBPLinearKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()); } else { sd::LongType zero = 0; std::vector<sd::LongType> *dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), 1,&zero); auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions); auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions); auto packGradIn = sd::ConstantTadHelper::getInstance().tadForDimensions(tempRes.shapeInfo(), dimensions); auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions); auto inputTads = packX->specialShapeInfo(); auto inputTadOffsets = packX->specialOffsets(); auto outputTads = packZ->specialShapeInfo(); auto outputTadOffsets = packZ->specialOffsets(); auto gradInTads = packGradIn->specialShapeInfo(); auto gradInTadOffsets = packGradIn->specialOffsets(); auto gradOutTads = packGradOut->specialShapeInfo(); auto gradOutTadOffsets = packGradOut->specialOffsets(); segmentMinBPTadKernel<T, I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>( input->specialBuffer(), input->specialShapeInfo(), tempRes.specialBuffer(), tempRes.specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradInTads, gradInTadOffsets, gradOutTads, gradOutTadOffsets, outputTads, outputTadOffsets); delete dimensions; } NDArray::registerSpecialUse({output}, {input, indices, gradOut, &tempRes}); return sd::Status::OK; } // -------------------------------------------------------------------------------------------------------------- // sd::Status unsortedSegmentMinFunctorBP(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* gradOut, sd::LongType numOfClasses, NDArray* output) { NDArray::prepareSpecialUse({output}, {input, indices, gradOut}); BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentMinFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), SD_FLOAT_TYPES, SD_INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, indices, gradOut}); } } // namespace helpers } // namespace ops } // namespace sd
71635d37cf2fd617e0abb528a6adb83080dad36b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crf_norm_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __device__ Dtype dot_stride2( const int count, const Dtype* const data1, const int stride1, const Dtype* const data2, const int stride2){ // Perform the dot with stride int offset1 = 0; int offset2 = 0; Dtype accum = 0; for (int i = 0; i < count; i++){ offset1 = i * stride1; offset2 = i * stride2; accum += data1[offset1] * data2[offset2]; } return accum; } template <typename Dtype> __device__ Dtype calc_distsq( const Dtype* const data1, const Dtype* const data2, const int im_height, const int im_width){ const Dtype h1 = data1[0] / im_height; const Dtype w1 = data1[1] / im_width; const Dtype h2 = data2[0] / im_height; const Dtype w2 = data2[1] / im_width; const Dtype dh = h1 - h2; const Dtype dw = w1 - w2; return dh * dh + dw * dw; } template <typename Dtype> __global__ void Forward_scaleinvariant_gpu_kernel( const int nthreads, const Dtype* const data_label, Dtype* data_diff, Dtype* bad_pixel_data, const int num, const int channels, const int height, const int width, const Dtype max_label, const Dtype min_label){ CUDA_KERNEL_LOOP(index, nthreads){ const int n = index / height; const int h = index % height; const int data_offset = (n*channels*height+h)*width; const int bad_pixel_idx = index; const int interval = height * width; // Iter the width and channels for (int w = 0; w < width; w++){ // Iter the channels int err_counter = 0; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; Dtype dataval = data_label[idx]; if (dataval > max_label){ err_counter++; }else if(dataval < min_label){ err_counter++; } } // Only if all channels invalid, the pixel will be considered // as invalid if(err_counter == channels){ bad_pixel_data[bad_pixel_idx] += channels; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; data_diff[idx] = 0; } } } } } template <typename Dtype> __global__ void Forward_gpu_kernel( const int nthreads, const Dtype* const data_label, Dtype* data_diff, Dtype* bad_pixel_data, const int num, const int channels, const int height, const int width, const bool has_max_label, const bool has_min_label, const Dtype max_label, const Dtype min_label, const Dtype C){ CUDA_KERNEL_LOOP(index, nthreads){ const int n = index / height; const int h = index % height; const int data_offset = (n*channels*height+h)*width; const int bad_pixel_idx = index; const int interval = height * width; // Iter the width and channels for (int w = 0; w < width; w++){ // Iter the channels int err_counter = 0; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; Dtype dataval = data_label[idx]; Dtype diffval = data_diff[idx]; if (has_max_label && dataval > max_label){ err_counter++; }else if(has_min_label && dataval < min_label){ err_counter++; //}else if(has_invalid_label && fabs(dataval - invalid_label) < 0.0001){ // err_counter++; } // alter the diff value if (diffval > 0 && diffval < C){ // L1 data_diff[idx] = C; }else if(diffval < 0 && -diffval < C){ data_diff[idx] = -C; } /* if (has_h_rate && diffval > H){ data_diff[idx] = H; }else if(has_h_rate && -diffval > H){ data_diff[idx] = -H; } */ } // Only if all channels invalid, the pixel will be considered // as invalid if(err_counter == channels){ bad_pixel_data[bad_pixel_idx] += channels; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval; data_diff[idx] = 0; } } } } } template <typename Dtype> __global__ void calc_rd_gpu_kernel( const int num_kernels, const Dtype* const dep_data, const Dtype* const norm_data, const Dtype* const centroid_data, const Dtype* const g_data, Dtype* r_data, Dtype* d_data, const int num, const int channels, const int height, const int width, const int im_height, const int im_width, const Dtype w1, const Dtype w2, const Dtype w3, const Dtype theta, const Dtype focal, const bool use_gradient){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / width; const int h = (index / width) % height; const int w = index % width; if (w >= h){ return; } const int sp_plan = height; const int d_stride = height * width; const int feature1_idx = (n*channels+0)*sp_plan+h; const int feature2_idx = (n*channels+0)*sp_plan+w; const int top_idx1 = ((n*height)+h)*width+w; const int top_idx2 = ((n*height)+w)*width+h; const int coord_idx1 = ((n + 0) * sp_plan + h) * 2; const int coord_idx2 = ((n + 0) * sp_plan + w) * 2; const int d_idx1 = (n * 2 * height + h) * width + w; const int d_idx2 = (n * 2 * height + w) * width + h; const int dep_idx1 = n*height+h; const int dep_idx2 = n*height+w; const Dtype* coord_data1 = centroid_data + coord_idx1; const Dtype* coord_data2 = centroid_data + coord_idx2; Dtype dot = 0; Dtype feature1_norm = 1; Dtype feature2_norm = 1; if (use_gradient){ // Convert the gradient to surface normal const Dtype data1_dx = norm_data[feature1_idx]; const Dtype data1_dy = norm_data[feature1_idx + sp_plan]; const Dtype data2_dx = norm_data[feature2_idx]; const Dtype data2_dy = norm_data[feature2_idx + sp_plan]; const Dtype data1_z = 1; const Dtype data1_x = - data1_z * data1_dx; const Dtype data1_y = - data1_z * data1_dy; const Dtype data2_z = 1; const Dtype data2_x = - data2_z * data2_dx; const Dtype data2_y = - data2_z * data2_dy; dot = data1_x * data2_x + data1_y * data2_y + data1_z + data2_z; feature1_norm = sqrt(data1_x * data1_x + data1_y * data1_y + data1_z + data1_z); feature2_norm = sqrt(data2_x * data2_x + data2_y * data2_y + data2_z + data2_z); }else{ // Calc the angle between two feature vectors // Calc the L2 normal of the given two vectors feature1_norm = sqrt(dot_stride2(channels, norm_data+feature1_idx, sp_plan, norm_data+feature1_idx, sp_plan)); feature2_norm = sqrt(dot_stride2(channels, norm_data+feature2_idx, sp_plan, norm_data+feature2_idx, sp_plan)); // Calc the dot between the two feature vector dot = dot_stride2(channels, norm_data+feature1_idx, sp_plan, norm_data+feature2_idx, sp_plan); } // Calc the angle between the vector Dtype cos_ang; if (feature1_norm == 0 || feature2_norm == 0){ cos_ang = 0; }else{ cos_ang = min(max(dot / feature1_norm / feature2_norm, Dtype(-1)), Dtype(1)); } // Apply the theta regulation if (cos_ang < theta){ cos_ang = 0; } // Calc the distance between two points Dtype distsq = calc_distsq<Dtype>(coord_data1, coord_data2, im_height, im_width); // The larger means the less regulation cos_ang = Dtype(1) - cos_ang; const Dtype height1 = coord_data1[0]; const Dtype width1 = coord_data1[1]; const Dtype height2 = coord_data2[0]; const Dtype width2 = coord_data2[1]; const Dtype dh = (height2 - height1) / focal; const Dtype dw = (width2 - width1) / focal; d_data[d_idx1] = dh; d_data[d_idx1+d_stride] = dw; d_data[d_idx2] = -dh; d_data[d_idx2+d_stride] = -dw; // Calc the project depth diff const Dtype g1y = g_data[(n * 2 + 0)* height + h]; const Dtype g1x = g_data[(n * 2 + 1)* height + h]; const Dtype g2y = g_data[(n * 2 + 0)* height + w]; const Dtype g2x = g_data[(n * 2 + 1)* height + w]; const Dtype dep1 = dep_data[dep_idx1]; const Dtype dep2 = dep_data[dep_idx2]; const Dtype dep1_proj = dep2 - dep2 * (dh * g2y + dw * g2x); const Dtype dep2_proj = dep1 + dep1 * (dh * g1y + dw * g1x); // The distance between two superpixel (focal normalized) const Dtype dist = sqrt(dh * dh + dw * dw); const Dtype proj_diff = (fabs(dep1_proj - dep1) + fabs(dep2_proj - dep2)) / dist; // Set the R data r_data[top_idx1] = -cos_ang * w1 - distsq * w2 - proj_diff * w3; r_data[top_idx2] = r_data[top_idx1]; } } template<typename Dtype> __global__ void calc_a_kmn_gpu_kernel( const int num_kernels, Dtype* a_data, const Dtype* const r_data, const Dtype* const d_data, const Dtype* const g_data, const int num, const int height, const int width){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height; const int h = index % height; // The channels is 1 // The basic index const int ar_idx = (n * height + h) * width; const int d_idx = (n * 2 * height + h) * width; const int g_idx = n * 2 * height; const int d_stride = height * width; const int g_stride = height; // A = A - K // K_ij = 0.5 * R_ij * (D_ij * G_i + D_ji * G_j) // K_ij = 0.5 * R_ij * (D_ij * G_i - D_ij * G_j) // A = A + M + N // M_ii = sigma_j (R_ij * D_ij * G_i) // N_ii = 0.5 * sigma_j (R_ij * (D_ij * G_i)^2 ) // Iter the width (superpixel num) for (int w = 0; w < width; w++){ Dtype k = 0.5 * r_data[ar_idx+w] * (d_data[d_idx+w] * g_data[g_idx+h] + d_data[d_idx+w+d_stride] * g_data[g_idx+h+g_stride] - d_data[d_idx+w] * g_data[g_idx+w] - d_data[d_idx+w+d_stride] * g_data[g_idx+w+g_stride]); a_data[ar_idx+w] = a_data[ar_idx+w] - k; Dtype val = d_data[d_idx+w] * g_data[g_idx+h] + d_data[d_idx+w+d_stride] * g_data[g_idx+h+g_stride]; a_data[ar_idx+h] += r_data[ar_idx+w] * val + 0.5 * r_data[ar_idx+w] * val * val; } } } template <typename Dtype> __global__ void calc_g_gpu_kernel( const int num_kernels, const Dtype* const bottom_data, Dtype* g_data, const int num, const int height){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height; const int h = index % height; // The basic index of the normal map const int norm_idx = n * 3 * height + h; // The basic index of the g const int g_idx = n * 2 * height + h; const Dtype x = bottom_data[norm_idx]; const Dtype y = bottom_data[norm_idx + height]; const Dtype z = bottom_data[norm_idx + 2 * height]; Dtype dx = - x / z; Dtype dy = - y / z; if (fabs(z) < 0.1){ dx = 0; dy = 0; } g_data[g_idx] = dy; g_data[g_idx + height] = dx; } } template <typename Dtype> __global__ void calc_a_gpu_kernel(const int n, Dtype* a_data, const int num, const int channels, const int height, const int width, const Dtype* r_data){ CUDA_KERNEL_LOOP(index, n) { const int n_idx = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int idx = ((n_idx*channels+c)*height+h)*width; // Calc the sum of the row in r_data Dtype sum = 0; for (int i = 0; i < width; i++){ sum += r_data[idx+i]; } a_data[idx+h] = sum + 1; } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_A_gpu(void){ // A = I + D -R // Set A to 0 caffe_gpu_set(A_.count(), Dtype(0), A_.mutable_gpu_data()); const int num = A_.num(); const int channels = A_.channels(); const int height = A_.height(); const int width = A_.width(); const Dtype* r_data = R_.gpu_data(); Dtype* a_data = A_.mutable_gpu_data(); // A = I + D // kernel num: n*c*h int num_kernels = num * channels * height; hipLaunchKernelGGL(( calc_a_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, a_data, num, channels, height, width, r_data); CUDA_POST_KERNEL_CHECK; // A = A - R caffe_gpu_axpy(A_.count(), Dtype(-1), r_data, a_data); // If disable the surface normal guidance, return directly if (disable_normal_guidance_) return; // A = A - K // K_ij = 0.5 * R_ij * (D_ij * G_i + D_ji * G_j) // K_ij = 0.5 * R_ij * (D_ij * G_i - D_ij * G_j) // A = A + M + N // M_ii = sigma_j (R_ij * D_ij * G_i) // N_ii = 0.5 * sigma_j (R_ij * (D_ij * G_i)^2 ) const Dtype* d_data = D_.gpu_data(); const Dtype* g_data = G_.gpu_data(); // The kernel number is n * height num_kernels = num * height; hipLaunchKernelGGL(( calc_a_kmn_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, a_data, r_data, d_data, g_data, num, height, width); CUDA_POST_KERNEL_CHECK; // If in scale invariant mode // A = A - Q if (unary_mode_ == ScaleInvariant){ const Dtype* q_data = Q_.gpu_data(); caffe_gpu_sub(A_.count(), a_data, q_data, a_data); } } /* Formulate the R matrix, which can be calculated from * bottom[2] normal prediction * bottom[3] centroid coordination * bottom[4] superpixel appearance [TODO] * * The R is combination of three parts: * 1. The cosine distance of surface normal, which is 1 - cos(angle) * 2. The normalized distance between two superpixel * 3. The cosine distance between appearance vector [optional] * * The overall R is: * exp(-w1*M1 - w2*M2 - w3*M3) * * Formulate the D matrix * Which is [n, 2, superpixel_num_, superpixel_num_] */ template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_RD_gpu(const vector<Blob<Dtype>*>& bottom){ const Blob<Dtype>* norm_bottom = bottom[2]; const Blob<Dtype>* centroid_bottom = bottom[3]; const Blob<Dtype>* dep_bottom = bottom[0]; const Dtype* norm_data = norm_bottom->gpu_data(); const Dtype* centroid_data = centroid_bottom->gpu_data(); const Dtype* g_data = G_.gpu_data(); const Dtype* dep_data = dep_bottom->gpu_data(); Dtype* r_data = R_.mutable_gpu_data(); Dtype* d_data = D_.mutable_gpu_data(); const int num = R_.num(); const int channels = norm_bottom->channels(); const int height = R_.height(); const int width = R_.width(); CHECK_EQ(height, width); // Clear the R caffe_gpu_set(R_.count(), Dtype(0), r_data); // Clear D caffe_gpu_set(D_.count(), Dtype(0), d_data); // The kernel number is the num * height * width // TODO: Since the top is a symmtic matrix, so half of the calculation is not // necessary. There might be a better method to assign the threads to the pixels const int num_kernels = num * height * width; hipLaunchKernelGGL(( calc_rd_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, dep_data, norm_data, centroid_data, g_data, r_data, d_data, num, channels, height, width, height_, width_, w1_, w2_, w3_, theta_, f_, use_gradient_); CUDA_POST_KERNEL_CHECK; // exp caffe_gpu_exp(R_.count(), r_data, r_data); // alpha the weight caffe_gpu_scal(R_.count(), Dtype(alpha_), r_data); } /* * Calc the G matrix according to the normal map * The bottom shape is [n, 3, superpixel_num_, 1] * The shape of G is [n, 2, superpixel_num_, 1] */ template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_G_gpu(const Blob<Dtype>* bottom){ Dtype* g_data = G_.mutable_gpu_data(); const Dtype* bottom_data = bottom->gpu_data(); if (use_gradient_){ CHECK_EQ(bottom->count(), G_.count()); caffe_copy(G_.count(), bottom_data, g_data); return; } // Set the G to zeros caffe_gpu_set(G_.count(), Dtype(0), g_data); const int num = G_.num(); const int height = G_.height(); CHECK_EQ(height, superpixel_num_); // The kernel number is num * height const int num_kernels = num * height; hipLaunchKernelGGL(( calc_g_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, bottom_data, g_data, num, height); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Euclidean_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ const Dtype* gt_data = gt->gpu_data(); const Dtype* pred_data = Pred_.gpu_data(); const int count = Pred_.count(); caffe_gpu_sub( count, pred_data, gt_data, Pred_.mutable_gpu_diff()); Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / count / Dtype(2); top->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::ScaleInvariant_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ CHECK_EQ(gt->channels(), 1); int count = gt->count(); caffe_gpu_sub( count, Pred_.gpu_data(), gt->gpu_data(), Pred_.mutable_gpu_diff()); Dtype* data_diff = Pred_.mutable_gpu_diff(); Dtype* vecSum_data = vecSum_.mutable_cpu_data(); const Dtype* data_label = gt->gpu_data(); const int num = gt->num(); const int channels = gt->channels(); const int height = gt->height(); const int width = gt->width(); // Set the number of the kernel] const int num_kernels = num * height; // Set the bad_pixel_ buffer to 0 Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data(); caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data); // Find the bad pixel and alter the diff if (has_min_label_ || has_max_label_){ hipLaunchKernelGGL(( Forward_scaleinvariant_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_label, data_diff, bad_pixel_data, num, channels, height, width, max_label_, min_label_); CUDA_POST_KERNEL_CHECK; } // The pixel number per image Dtype pixel_num = gt->count(1); // Calc the each image's valid pixel number in minibatch /* for (int n = 0; n < diff_.num(); n++){ if(is_adjust_pixel_num_){ Dtype val; int offset = bad_pixel_.offset(n); caffe_gpu_asum(height, bad_pixel_data + offset, &val); vecValidPixelNum_data[n] = pixel_num - val; }else{ vecValidPixelNum_data[n] = pixel_num; } } */ Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / count / Dtype(2); // Calc the second term of the loss for (int n = 0; n < gt->num(); n++){ const Dtype* cdata_diff = Pred_.cpu_diff() + Pred_.offset(n); Dtype valid_num = pixel_num; Dtype vecSum = caffe_cpu_sum(pixel_num, cdata_diff); vecSum_data[n] = vecSum; loss -= vecSum_data[n] * vecSum_data[n] / valid_num / valid_num / gt->num() * delta_ / Dtype(2); } top->mutable_cpu_data()[0] = loss; // DLOG(INFO) << "valid pixel num:" << valid_pixel_num_ <<" Loss:" << loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Berhu_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ const int count = Pred_.count(); const Dtype* label_data = gt->gpu_data(); const Dtype* pred_data = Pred_.gpu_data(); caffe_gpu_sub( count, pred_data, label_data, Pred_.mutable_gpu_diff()); Dtype max_diff = 0; switch(c_rate_mode_){ case MAX: // Get the abs max diff to determine the C max_diff = caffe_gpu_amax(count, Pred_.gpu_diff(), 1); // Calc the Threshold C break; case AVE: // Calc the mean of the abs diff caffe_gpu_asum(count, Pred_.gpu_diff(), &max_diff); max_diff = max_diff / count; break; default: LOG(FATAL) << "False c_rate_mode"; break; } Dtype C = fabs(max_diff * c_rate_); Dtype* data_diff = Pred_.mutable_gpu_diff(); // const Dtype* data_pred = bottom[0]->cpu_data(); const int num = Pred_.num(); const int channels = Pred_.channels(); const int height = Pred_.height(); const int width = Pred_.width(); // The number of kernel is num * height, process a row each time const int num_kernels = num * height; // Set the bad_pixel_ buffer to zero Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data(); caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data); // Find the bad pixel and alter the diff hipLaunchKernelGGL(( Forward_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, label_data, data_diff, bad_pixel_data, num, channels, height, width, has_max_label_, has_min_label_, max_label_, min_label_, C); CUDA_POST_KERNEL_CHECK; Dtype bad_pixel_count; caffe_gpu_asum(bad_pixel_.count(), bad_pixel_data, &bad_pixel_count); Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / Dtype(2) / (count-bad_pixel_count); top->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Inference_gpu(const Blob<Dtype>* Z){ const int dim = A_inv_.height(); const Dtype* a_data = A_inv_.gpu_data(); const Dtype* z_data = Z->gpu_data(); Dtype* pred_data = Pred_.mutable_gpu_data(); for (int n = 0; n < A_inv_.num(); n++){ const Dtype* a_data_n = a_data + A_inv_.offset(n); for (int c = 0; c < Z->channels(); c++){ const Dtype* z_data_nc = z_data + Z->offset(n, c); Dtype* pred_data_nc = pred_data + Pred_.offset(n, c); caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), a_data_n, z_data_nc, Dtype(0), pred_data_nc); } } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ // Generate the G // NOTE: The G must be calc before the R Calc_G_gpu(bottom[2]); // Generate the R and D Calc_RD_gpu(bottom); // Init the QP for scale invariant mode Init_QP_gpu(); // Calc the A matrix Calc_A_gpu(); // Calc the A_inv matrix // ---- DEBUG ----- // test the speed // time_t start, end; //runstart = clock(); Calc_A_inv_gpu(); //end = clock(); // LOG(INFO)<<"TIME: "<<(Dtype)(end-start)/CLOCKS_PER_SEC; // ---- DEBUG ----- // ----DEBUG ---- /* Blob<Dtype> tmp; int dim = A_.height(); tmp.Reshape(1,1,A_.height(), A_.width()); for (int n = 0; n < A_.num(); n++){ caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, dim, dim, dim, Dtype(1), A_.cpu_data()+A_.offset(n), A_inv_.cpu_data()+A_inv_.offset(n), Dtype(0), tmp.mutable_cpu_data()); LOG(INFO)<<"Det(S): "<<caffe_cpu_det(A_.height(), tmp.cpu_data()); } */ // Inference the crf switch (unary_mode_){ case ScaleInvariant: Inference_scaleinvariant_gpu(bottom[0]); break; default: Inference_gpu(bottom[0]); break; } // Copy the result if needed if (top.size() == 2){ caffe_copy(Pred_.count(), Pred_.gpu_data(), top[1]->mutable_gpu_data()); } // Calc the loss according to the Pred_ and the bottom[0] switch (unary_mode_){ case L2: Euclidean_loss_gpu(bottom[1], top[0]); break; case Berhu: Berhu_loss_gpu(bottom[1], top[0]); break; case ScaleInvariant: ScaleInvariant_loss_gpu(bottom[1], top[0]); break; default: LOG(FATAL)<<"Unknow unary_mode_ in CrfNormLossLayer"; break; } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // The BP will performed on the bottom[0] and bottom[2] const Dtype loss_weight = top[0]->cpu_diff()[0]; Dtype beta; if (normalize_){ beta = loss_weight / bottom[0]->count(); }else{ beta = loss_weight / bottom[0]->num(); } // BP for bottom[0] if (unary_mode_ == ScaleInvariant){ caffe_gpu_axpby( bottom[0]->count(), beta, Pred_.cpu_diff(), Dtype(0), buf_.mutable_gpu_data()); // In scale invariant mode, the BP should be P*A_inv_*P*Z - P*Y // diff = A_inv_*P*Z - Y, so the BP should be // P * diff const Dtype* p_data = P_.gpu_data(); const Dtype* buf_data = buf_.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); for (int n = 0; n < bottom[0]->num(); n++){ const Dtype* p_data_n = p_data + P_.offset(n); for (int c = 0; c < bottom[0]->channels(); c++){ const Dtype* buf_data_nc = buf_data + buf_.offset(n, c); Dtype* bottom_diff_nc = bottom_diff + bottom[0]->offset(n, c); caffe_gpu_gemv(CblasNoTrans, P_.height(), P_.height(), Dtype(1), p_data_n, buf_data_nc, Dtype(0), bottom_diff_nc); } } }else{ // Other modes caffe_gpu_axpby( bottom[0]->count(), beta, Pred_.gpu_diff(), Dtype(0), bottom[0]->mutable_gpu_diff()); } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_A_inv_gpu(void){ const int num = A_.num(); const Dtype* a_data = A_.gpu_data(); Dtype* a_inv_data = A_inv_.mutable_gpu_data(); const int height = A_.height(); caffe_gpu_inv(height, num, a_data, a_inv_data); } template <typename Dtype> void CrfNormLossLayer<Dtype>::Init_QP_gpu(void){ if (unary_mode_ != ScaleInvariant) return; const Dtype val = delta_ / superpixel_num_; // Set the Q matrix Dtype* q_data = Q_.mutable_gpu_data(); caffe_gpu_set(Q_.count(), val, q_data); // Set the P matrix // P = I - Q Dtype* p_data = P_.mutable_gpu_data(); caffe_gpu_set(P_.count(), Dtype(0), p_data); caffe_gpu_sub(P_.count(), p_data, q_data, p_data); Dtype* p_data_cpu = P_.mutable_cpu_data(); for (int n = 0; n < P_.num(); n++){ for (int i = 0; i < P_.height(); i++){ p_data_cpu[P_.offset(n, 0, i, i)] += Dtype(1); } } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Inference_scaleinvariant_gpu(const Blob<Dtype>* Z){ // pred = A_inv_ * P * Z const int dim = A_inv_.height(); const Dtype* a_data = A_inv_.gpu_data(); const Dtype* z_data = Z->gpu_data(); const Dtype* p_data = P_.gpu_data(); // Creat a buffer to make sure the blas safe Dtype* buf_data = buf_.mutable_gpu_data(); Dtype* pred_data = Pred_.mutable_gpu_data(); for (int n = 0; n < A_inv_.num(); n++){ const Dtype* a_data_n = a_data + A_inv_.offset(n); const Dtype* p_data_n = p_data + P_.offset(n); for (int c = 0; c < Z->channels(); c++){ const Dtype* z_data_nc = z_data + Z->offset(n, c); Dtype* buf_data_nc = buf_data + buf_.offset(n, c); Dtype* pred_data_nc = pred_data + Pred_.offset(n, c); // buf = P * Z caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), p_data_n, z_data_nc, Dtype(0), buf_data_nc); // pred = A_inv_ * pred caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), a_data_n, buf_data_nc, Dtype(0), pred_data_nc); } } } template void CrfNormLossLayer<float>::Calc_A_gpu(void); template void CrfNormLossLayer<double>::Calc_A_gpu(void); template void CrfNormLossLayer<float>::Calc_RD_gpu(const vector<Blob<float>*>& bottom); template void CrfNormLossLayer<double>::Calc_RD_gpu(const vector<Blob<double>*>& bottom); template void CrfNormLossLayer<float>::Calc_G_gpu(const Blob<float>* bottom); template void CrfNormLossLayer<double>::Calc_G_gpu(const Blob<double>* bottom); template void CrfNormLossLayer<float>::Euclidean_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::Euclidean_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::Berhu_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::Berhu_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::ScaleInvariant_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::ScaleInvariant_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::Inference_gpu(const Blob<float>* Z); template void CrfNormLossLayer<double>::Inference_gpu(const Blob<double>* Z); template void CrfNormLossLayer<float>::Calc_A_inv_gpu(void); template void CrfNormLossLayer<double>::Calc_A_inv_gpu(void); template void CrfNormLossLayer<float>::Init_QP_gpu(void); template void CrfNormLossLayer<double>::Init_QP_gpu(void); template void CrfNormLossLayer<float>::Inference_scaleinvariant_gpu(const Blob<float>* Z); template void CrfNormLossLayer<double>::Inference_scaleinvariant_gpu(const Blob<double>* Z); INSTANTIATE_LAYER_GPU_FUNCS(CrfNormLossLayer); } // namespace caffe
71635d37cf2fd617e0abb528a6adb83080dad36b.cu
#include <vector> #include "caffe/layers/crf_norm_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __device__ Dtype dot_stride2( const int count, const Dtype* const data1, const int stride1, const Dtype* const data2, const int stride2){ // Perform the dot with stride int offset1 = 0; int offset2 = 0; Dtype accum = 0; for (int i = 0; i < count; i++){ offset1 = i * stride1; offset2 = i * stride2; accum += data1[offset1] * data2[offset2]; } return accum; } template <typename Dtype> __device__ Dtype calc_distsq( const Dtype* const data1, const Dtype* const data2, const int im_height, const int im_width){ const Dtype h1 = data1[0] / im_height; const Dtype w1 = data1[1] / im_width; const Dtype h2 = data2[0] / im_height; const Dtype w2 = data2[1] / im_width; const Dtype dh = h1 - h2; const Dtype dw = w1 - w2; return dh * dh + dw * dw; } template <typename Dtype> __global__ void Forward_scaleinvariant_gpu_kernel( const int nthreads, const Dtype* const data_label, Dtype* data_diff, Dtype* bad_pixel_data, const int num, const int channels, const int height, const int width, const Dtype max_label, const Dtype min_label){ CUDA_KERNEL_LOOP(index, nthreads){ const int n = index / height; const int h = index % height; const int data_offset = (n*channels*height+h)*width; const int bad_pixel_idx = index; const int interval = height * width; // Iter the width and channels for (int w = 0; w < width; w++){ // Iter the channels int err_counter = 0; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; Dtype dataval = data_label[idx]; if (dataval > max_label){ err_counter++; }else if(dataval < min_label){ err_counter++; } } // Only if all channels invalid, the pixel will be considered // as invalid if(err_counter == channels){ bad_pixel_data[bad_pixel_idx] += channels; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; data_diff[idx] = 0; } } } } } template <typename Dtype> __global__ void Forward_gpu_kernel( const int nthreads, const Dtype* const data_label, Dtype* data_diff, Dtype* bad_pixel_data, const int num, const int channels, const int height, const int width, const bool has_max_label, const bool has_min_label, const Dtype max_label, const Dtype min_label, const Dtype C){ CUDA_KERNEL_LOOP(index, nthreads){ const int n = index / height; const int h = index % height; const int data_offset = (n*channels*height+h)*width; const int bad_pixel_idx = index; const int interval = height * width; // Iter the width and channels for (int w = 0; w < width; w++){ // Iter the channels int err_counter = 0; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval + w; Dtype dataval = data_label[idx]; Dtype diffval = data_diff[idx]; if (has_max_label && dataval > max_label){ err_counter++; }else if(has_min_label && dataval < min_label){ err_counter++; //}else if(has_invalid_label && fabs(dataval - invalid_label) < 0.0001){ // err_counter++; } // alter the diff value if (diffval > 0 && diffval < C){ // L1 data_diff[idx] = C; }else if(diffval < 0 && -diffval < C){ data_diff[idx] = -C; } /* if (has_h_rate && diffval > H){ data_diff[idx] = H; }else if(has_h_rate && -diffval > H){ data_diff[idx] = -H; } */ } // Only if all channels invalid, the pixel will be considered // as invalid if(err_counter == channels){ bad_pixel_data[bad_pixel_idx] += channels; for (int c = 0; c < channels; c++){ const int idx = data_offset + c * interval; data_diff[idx] = 0; } } } } } template <typename Dtype> __global__ void calc_rd_gpu_kernel( const int num_kernels, const Dtype* const dep_data, const Dtype* const norm_data, const Dtype* const centroid_data, const Dtype* const g_data, Dtype* r_data, Dtype* d_data, const int num, const int channels, const int height, const int width, const int im_height, const int im_width, const Dtype w1, const Dtype w2, const Dtype w3, const Dtype theta, const Dtype focal, const bool use_gradient){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height / width; const int h = (index / width) % height; const int w = index % width; if (w >= h){ return; } const int sp_plan = height; const int d_stride = height * width; const int feature1_idx = (n*channels+0)*sp_plan+h; const int feature2_idx = (n*channels+0)*sp_plan+w; const int top_idx1 = ((n*height)+h)*width+w; const int top_idx2 = ((n*height)+w)*width+h; const int coord_idx1 = ((n + 0) * sp_plan + h) * 2; const int coord_idx2 = ((n + 0) * sp_plan + w) * 2; const int d_idx1 = (n * 2 * height + h) * width + w; const int d_idx2 = (n * 2 * height + w) * width + h; const int dep_idx1 = n*height+h; const int dep_idx2 = n*height+w; const Dtype* coord_data1 = centroid_data + coord_idx1; const Dtype* coord_data2 = centroid_data + coord_idx2; Dtype dot = 0; Dtype feature1_norm = 1; Dtype feature2_norm = 1; if (use_gradient){ // Convert the gradient to surface normal const Dtype data1_dx = norm_data[feature1_idx]; const Dtype data1_dy = norm_data[feature1_idx + sp_plan]; const Dtype data2_dx = norm_data[feature2_idx]; const Dtype data2_dy = norm_data[feature2_idx + sp_plan]; const Dtype data1_z = 1; const Dtype data1_x = - data1_z * data1_dx; const Dtype data1_y = - data1_z * data1_dy; const Dtype data2_z = 1; const Dtype data2_x = - data2_z * data2_dx; const Dtype data2_y = - data2_z * data2_dy; dot = data1_x * data2_x + data1_y * data2_y + data1_z + data2_z; feature1_norm = sqrt(data1_x * data1_x + data1_y * data1_y + data1_z + data1_z); feature2_norm = sqrt(data2_x * data2_x + data2_y * data2_y + data2_z + data2_z); }else{ // Calc the angle between two feature vectors // Calc the L2 normal of the given two vectors feature1_norm = sqrt(dot_stride2(channels, norm_data+feature1_idx, sp_plan, norm_data+feature1_idx, sp_plan)); feature2_norm = sqrt(dot_stride2(channels, norm_data+feature2_idx, sp_plan, norm_data+feature2_idx, sp_plan)); // Calc the dot between the two feature vector dot = dot_stride2(channels, norm_data+feature1_idx, sp_plan, norm_data+feature2_idx, sp_plan); } // Calc the angle between the vector Dtype cos_ang; if (feature1_norm == 0 || feature2_norm == 0){ cos_ang = 0; }else{ cos_ang = min(max(dot / feature1_norm / feature2_norm, Dtype(-1)), Dtype(1)); } // Apply the theta regulation if (cos_ang < theta){ cos_ang = 0; } // Calc the distance between two points Dtype distsq = calc_distsq<Dtype>(coord_data1, coord_data2, im_height, im_width); // The larger means the less regulation cos_ang = Dtype(1) - cos_ang; const Dtype height1 = coord_data1[0]; const Dtype width1 = coord_data1[1]; const Dtype height2 = coord_data2[0]; const Dtype width2 = coord_data2[1]; const Dtype dh = (height2 - height1) / focal; const Dtype dw = (width2 - width1) / focal; d_data[d_idx1] = dh; d_data[d_idx1+d_stride] = dw; d_data[d_idx2] = -dh; d_data[d_idx2+d_stride] = -dw; // Calc the project depth diff const Dtype g1y = g_data[(n * 2 + 0)* height + h]; const Dtype g1x = g_data[(n * 2 + 1)* height + h]; const Dtype g2y = g_data[(n * 2 + 0)* height + w]; const Dtype g2x = g_data[(n * 2 + 1)* height + w]; const Dtype dep1 = dep_data[dep_idx1]; const Dtype dep2 = dep_data[dep_idx2]; const Dtype dep1_proj = dep2 - dep2 * (dh * g2y + dw * g2x); const Dtype dep2_proj = dep1 + dep1 * (dh * g1y + dw * g1x); // The distance between two superpixel (focal normalized) const Dtype dist = sqrt(dh * dh + dw * dw); const Dtype proj_diff = (fabs(dep1_proj - dep1) + fabs(dep2_proj - dep2)) / dist; // Set the R data r_data[top_idx1] = -cos_ang * w1 - distsq * w2 - proj_diff * w3; r_data[top_idx2] = r_data[top_idx1]; } } template<typename Dtype> __global__ void calc_a_kmn_gpu_kernel( const int num_kernels, Dtype* a_data, const Dtype* const r_data, const Dtype* const d_data, const Dtype* const g_data, const int num, const int height, const int width){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height; const int h = index % height; // The channels is 1 // The basic index const int ar_idx = (n * height + h) * width; const int d_idx = (n * 2 * height + h) * width; const int g_idx = n * 2 * height; const int d_stride = height * width; const int g_stride = height; // A = A - K // K_ij = 0.5 * R_ij * (D_ij * G_i + D_ji * G_j) // K_ij = 0.5 * R_ij * (D_ij * G_i - D_ij * G_j) // A = A + M + N // M_ii = sigma_j (R_ij * D_ij * G_i) // N_ii = 0.5 * sigma_j (R_ij * (D_ij * G_i)^2 ) // Iter the width (superpixel num) for (int w = 0; w < width; w++){ Dtype k = 0.5 * r_data[ar_idx+w] * (d_data[d_idx+w] * g_data[g_idx+h] + d_data[d_idx+w+d_stride] * g_data[g_idx+h+g_stride] - d_data[d_idx+w] * g_data[g_idx+w] - d_data[d_idx+w+d_stride] * g_data[g_idx+w+g_stride]); a_data[ar_idx+w] = a_data[ar_idx+w] - k; Dtype val = d_data[d_idx+w] * g_data[g_idx+h] + d_data[d_idx+w+d_stride] * g_data[g_idx+h+g_stride]; a_data[ar_idx+h] += r_data[ar_idx+w] * val + 0.5 * r_data[ar_idx+w] * val * val; } } } template <typename Dtype> __global__ void calc_g_gpu_kernel( const int num_kernels, const Dtype* const bottom_data, Dtype* g_data, const int num, const int height){ CUDA_KERNEL_LOOP(index, num_kernels){ const int n = index / height; const int h = index % height; // The basic index of the normal map const int norm_idx = n * 3 * height + h; // The basic index of the g const int g_idx = n * 2 * height + h; const Dtype x = bottom_data[norm_idx]; const Dtype y = bottom_data[norm_idx + height]; const Dtype z = bottom_data[norm_idx + 2 * height]; Dtype dx = - x / z; Dtype dy = - y / z; if (fabs(z) < 0.1){ dx = 0; dy = 0; } g_data[g_idx] = dy; g_data[g_idx + height] = dx; } } template <typename Dtype> __global__ void calc_a_gpu_kernel(const int n, Dtype* a_data, const int num, const int channels, const int height, const int width, const Dtype* r_data){ CUDA_KERNEL_LOOP(index, n) { const int n_idx = index / height / channels; const int c = (index / height) % channels; const int h = index % height; const int idx = ((n_idx*channels+c)*height+h)*width; // Calc the sum of the row in r_data Dtype sum = 0; for (int i = 0; i < width; i++){ sum += r_data[idx+i]; } a_data[idx+h] = sum + 1; } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_A_gpu(void){ // A = I + D -R // Set A to 0 caffe_gpu_set(A_.count(), Dtype(0), A_.mutable_gpu_data()); const int num = A_.num(); const int channels = A_.channels(); const int height = A_.height(); const int width = A_.width(); const Dtype* r_data = R_.gpu_data(); Dtype* a_data = A_.mutable_gpu_data(); // A = I + D // kernel num: n*c*h int num_kernels = num * channels * height; calc_a_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, a_data, num, channels, height, width, r_data); CUDA_POST_KERNEL_CHECK; // A = A - R caffe_gpu_axpy(A_.count(), Dtype(-1), r_data, a_data); // If disable the surface normal guidance, return directly if (disable_normal_guidance_) return; // A = A - K // K_ij = 0.5 * R_ij * (D_ij * G_i + D_ji * G_j) // K_ij = 0.5 * R_ij * (D_ij * G_i - D_ij * G_j) // A = A + M + N // M_ii = sigma_j (R_ij * D_ij * G_i) // N_ii = 0.5 * sigma_j (R_ij * (D_ij * G_i)^2 ) const Dtype* d_data = D_.gpu_data(); const Dtype* g_data = G_.gpu_data(); // The kernel number is n * height num_kernels = num * height; calc_a_kmn_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(num_kernels, a_data, r_data, d_data, g_data, num, height, width); CUDA_POST_KERNEL_CHECK; // If in scale invariant mode // A = A - Q if (unary_mode_ == ScaleInvariant){ const Dtype* q_data = Q_.gpu_data(); caffe_gpu_sub(A_.count(), a_data, q_data, a_data); } } /* Formulate the R matrix, which can be calculated from * bottom[2] normal prediction * bottom[3] centroid coordination * bottom[4] superpixel appearance [TODO] * * The R is combination of three parts: * 1. The cosine distance of surface normal, which is 1 - cos(angle) * 2. The normalized distance between two superpixel * 3. The cosine distance between appearance vector [optional] * * The overall R is: * exp(-w1*M1 - w2*M2 - w3*M3) * * Formulate the D matrix * Which is [n, 2, superpixel_num_, superpixel_num_] */ template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_RD_gpu(const vector<Blob<Dtype>*>& bottom){ const Blob<Dtype>* norm_bottom = bottom[2]; const Blob<Dtype>* centroid_bottom = bottom[3]; const Blob<Dtype>* dep_bottom = bottom[0]; const Dtype* norm_data = norm_bottom->gpu_data(); const Dtype* centroid_data = centroid_bottom->gpu_data(); const Dtype* g_data = G_.gpu_data(); const Dtype* dep_data = dep_bottom->gpu_data(); Dtype* r_data = R_.mutable_gpu_data(); Dtype* d_data = D_.mutable_gpu_data(); const int num = R_.num(); const int channels = norm_bottom->channels(); const int height = R_.height(); const int width = R_.width(); CHECK_EQ(height, width); // Clear the R caffe_gpu_set(R_.count(), Dtype(0), r_data); // Clear D caffe_gpu_set(D_.count(), Dtype(0), d_data); // The kernel number is the num * height * width // TODO: Since the top is a symmtic matrix, so half of the calculation is not // necessary. There might be a better method to assign the threads to the pixels const int num_kernels = num * height * width; calc_rd_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, dep_data, norm_data, centroid_data, g_data, r_data, d_data, num, channels, height, width, height_, width_, w1_, w2_, w3_, theta_, f_, use_gradient_); CUDA_POST_KERNEL_CHECK; // exp caffe_gpu_exp(R_.count(), r_data, r_data); // alpha the weight caffe_gpu_scal(R_.count(), Dtype(alpha_), r_data); } /* * Calc the G matrix according to the normal map * The bottom shape is [n, 3, superpixel_num_, 1] * The shape of G is [n, 2, superpixel_num_, 1] */ template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_G_gpu(const Blob<Dtype>* bottom){ Dtype* g_data = G_.mutable_gpu_data(); const Dtype* bottom_data = bottom->gpu_data(); if (use_gradient_){ CHECK_EQ(bottom->count(), G_.count()); caffe_copy(G_.count(), bottom_data, g_data); return; } // Set the G to zeros caffe_gpu_set(G_.count(), Dtype(0), g_data); const int num = G_.num(); const int height = G_.height(); CHECK_EQ(height, superpixel_num_); // The kernel number is num * height const int num_kernels = num * height; calc_g_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, bottom_data, g_data, num, height); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Euclidean_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ const Dtype* gt_data = gt->gpu_data(); const Dtype* pred_data = Pred_.gpu_data(); const int count = Pred_.count(); caffe_gpu_sub( count, pred_data, gt_data, Pred_.mutable_gpu_diff()); Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / count / Dtype(2); top->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::ScaleInvariant_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ CHECK_EQ(gt->channels(), 1); int count = gt->count(); caffe_gpu_sub( count, Pred_.gpu_data(), gt->gpu_data(), Pred_.mutable_gpu_diff()); Dtype* data_diff = Pred_.mutable_gpu_diff(); Dtype* vecSum_data = vecSum_.mutable_cpu_data(); const Dtype* data_label = gt->gpu_data(); const int num = gt->num(); const int channels = gt->channels(); const int height = gt->height(); const int width = gt->width(); // Set the number of the kernel] const int num_kernels = num * height; // Set the bad_pixel_ buffer to 0 Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data(); caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data); // Find the bad pixel and alter the diff if (has_min_label_ || has_max_label_){ Forward_scaleinvariant_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_label, data_diff, bad_pixel_data, num, channels, height, width, max_label_, min_label_); CUDA_POST_KERNEL_CHECK; } // The pixel number per image Dtype pixel_num = gt->count(1); // Calc the each image's valid pixel number in minibatch /* for (int n = 0; n < diff_.num(); n++){ if(is_adjust_pixel_num_){ Dtype val; int offset = bad_pixel_.offset(n); caffe_gpu_asum(height, bad_pixel_data + offset, &val); vecValidPixelNum_data[n] = pixel_num - val; }else{ vecValidPixelNum_data[n] = pixel_num; } } */ Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / count / Dtype(2); // Calc the second term of the loss for (int n = 0; n < gt->num(); n++){ const Dtype* cdata_diff = Pred_.cpu_diff() + Pred_.offset(n); Dtype valid_num = pixel_num; Dtype vecSum = caffe_cpu_sum(pixel_num, cdata_diff); vecSum_data[n] = vecSum; loss -= vecSum_data[n] * vecSum_data[n] / valid_num / valid_num / gt->num() * delta_ / Dtype(2); } top->mutable_cpu_data()[0] = loss; // DLOG(INFO) << "valid pixel num:" << valid_pixel_num_ <<" Loss:" << loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Berhu_loss_gpu(const Blob<Dtype>* gt, Blob<Dtype>* top){ const int count = Pred_.count(); const Dtype* label_data = gt->gpu_data(); const Dtype* pred_data = Pred_.gpu_data(); caffe_gpu_sub( count, pred_data, label_data, Pred_.mutable_gpu_diff()); Dtype max_diff = 0; switch(c_rate_mode_){ case MAX: // Get the abs max diff to determine the C max_diff = caffe_gpu_amax(count, Pred_.gpu_diff(), 1); // Calc the Threshold C break; case AVE: // Calc the mean of the abs diff caffe_gpu_asum(count, Pred_.gpu_diff(), &max_diff); max_diff = max_diff / count; break; default: LOG(FATAL) << "False c_rate_mode"; break; } Dtype C = fabs(max_diff * c_rate_); Dtype* data_diff = Pred_.mutable_gpu_diff(); // const Dtype* data_pred = bottom[0]->cpu_data(); const int num = Pred_.num(); const int channels = Pred_.channels(); const int height = Pred_.height(); const int width = Pred_.width(); // The number of kernel is num * height, process a row each time const int num_kernels = num * height; // Set the bad_pixel_ buffer to zero Dtype* bad_pixel_data = bad_pixel_.mutable_gpu_data(); caffe_gpu_set(bad_pixel_.count(), Dtype(0), bad_pixel_data); // Find the bad pixel and alter the diff Forward_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, label_data, data_diff, bad_pixel_data, num, channels, height, width, has_max_label_, has_min_label_, max_label_, min_label_, C); CUDA_POST_KERNEL_CHECK; Dtype bad_pixel_count; caffe_gpu_asum(bad_pixel_.count(), bad_pixel_data, &bad_pixel_count); Dtype dot; caffe_gpu_dot(count, Pred_.gpu_diff(), Pred_.gpu_diff(), &dot); Dtype loss = dot / Dtype(2) / (count-bad_pixel_count); top->mutable_cpu_data()[0] = loss; } template <typename Dtype> void CrfNormLossLayer<Dtype>::Inference_gpu(const Blob<Dtype>* Z){ const int dim = A_inv_.height(); const Dtype* a_data = A_inv_.gpu_data(); const Dtype* z_data = Z->gpu_data(); Dtype* pred_data = Pred_.mutable_gpu_data(); for (int n = 0; n < A_inv_.num(); n++){ const Dtype* a_data_n = a_data + A_inv_.offset(n); for (int c = 0; c < Z->channels(); c++){ const Dtype* z_data_nc = z_data + Z->offset(n, c); Dtype* pred_data_nc = pred_data + Pred_.offset(n, c); caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), a_data_n, z_data_nc, Dtype(0), pred_data_nc); } } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top){ // Generate the G // NOTE: The G must be calc before the R Calc_G_gpu(bottom[2]); // Generate the R and D Calc_RD_gpu(bottom); // Init the QP for scale invariant mode Init_QP_gpu(); // Calc the A matrix Calc_A_gpu(); // Calc the A_inv matrix // ---- DEBUG ----- // test the speed // time_t start, end; //runstart = clock(); Calc_A_inv_gpu(); //end = clock(); // LOG(INFO)<<"TIME: "<<(Dtype)(end-start)/CLOCKS_PER_SEC; // ---- DEBUG ----- // ----DEBUG ---- /* Blob<Dtype> tmp; int dim = A_.height(); tmp.Reshape(1,1,A_.height(), A_.width()); for (int n = 0; n < A_.num(); n++){ caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, dim, dim, dim, Dtype(1), A_.cpu_data()+A_.offset(n), A_inv_.cpu_data()+A_inv_.offset(n), Dtype(0), tmp.mutable_cpu_data()); LOG(INFO)<<"Det(S): "<<caffe_cpu_det(A_.height(), tmp.cpu_data()); } */ // Inference the crf switch (unary_mode_){ case ScaleInvariant: Inference_scaleinvariant_gpu(bottom[0]); break; default: Inference_gpu(bottom[0]); break; } // Copy the result if needed if (top.size() == 2){ caffe_copy(Pred_.count(), Pred_.gpu_data(), top[1]->mutable_gpu_data()); } // Calc the loss according to the Pred_ and the bottom[0] switch (unary_mode_){ case L2: Euclidean_loss_gpu(bottom[1], top[0]); break; case Berhu: Berhu_loss_gpu(bottom[1], top[0]); break; case ScaleInvariant: ScaleInvariant_loss_gpu(bottom[1], top[0]); break; default: LOG(FATAL)<<"Unknow unary_mode_ in CrfNormLossLayer"; break; } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // The BP will performed on the bottom[0] and bottom[2] const Dtype loss_weight = top[0]->cpu_diff()[0]; Dtype beta; if (normalize_){ beta = loss_weight / bottom[0]->count(); }else{ beta = loss_weight / bottom[0]->num(); } // BP for bottom[0] if (unary_mode_ == ScaleInvariant){ caffe_gpu_axpby( bottom[0]->count(), beta, Pred_.cpu_diff(), Dtype(0), buf_.mutable_gpu_data()); // In scale invariant mode, the BP should be P*A_inv_*P*Z - P*Y // diff = A_inv_*P*Z - Y, so the BP should be // P * diff const Dtype* p_data = P_.gpu_data(); const Dtype* buf_data = buf_.gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); for (int n = 0; n < bottom[0]->num(); n++){ const Dtype* p_data_n = p_data + P_.offset(n); for (int c = 0; c < bottom[0]->channels(); c++){ const Dtype* buf_data_nc = buf_data + buf_.offset(n, c); Dtype* bottom_diff_nc = bottom_diff + bottom[0]->offset(n, c); caffe_gpu_gemv(CblasNoTrans, P_.height(), P_.height(), Dtype(1), p_data_n, buf_data_nc, Dtype(0), bottom_diff_nc); } } }else{ // Other modes caffe_gpu_axpby( bottom[0]->count(), beta, Pred_.gpu_diff(), Dtype(0), bottom[0]->mutable_gpu_diff()); } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Calc_A_inv_gpu(void){ const int num = A_.num(); const Dtype* a_data = A_.gpu_data(); Dtype* a_inv_data = A_inv_.mutable_gpu_data(); const int height = A_.height(); caffe_gpu_inv(height, num, a_data, a_inv_data); } template <typename Dtype> void CrfNormLossLayer<Dtype>::Init_QP_gpu(void){ if (unary_mode_ != ScaleInvariant) return; const Dtype val = delta_ / superpixel_num_; // Set the Q matrix Dtype* q_data = Q_.mutable_gpu_data(); caffe_gpu_set(Q_.count(), val, q_data); // Set the P matrix // P = I - Q Dtype* p_data = P_.mutable_gpu_data(); caffe_gpu_set(P_.count(), Dtype(0), p_data); caffe_gpu_sub(P_.count(), p_data, q_data, p_data); Dtype* p_data_cpu = P_.mutable_cpu_data(); for (int n = 0; n < P_.num(); n++){ for (int i = 0; i < P_.height(); i++){ p_data_cpu[P_.offset(n, 0, i, i)] += Dtype(1); } } } template <typename Dtype> void CrfNormLossLayer<Dtype>::Inference_scaleinvariant_gpu(const Blob<Dtype>* Z){ // pred = A_inv_ * P * Z const int dim = A_inv_.height(); const Dtype* a_data = A_inv_.gpu_data(); const Dtype* z_data = Z->gpu_data(); const Dtype* p_data = P_.gpu_data(); // Creat a buffer to make sure the blas safe Dtype* buf_data = buf_.mutable_gpu_data(); Dtype* pred_data = Pred_.mutable_gpu_data(); for (int n = 0; n < A_inv_.num(); n++){ const Dtype* a_data_n = a_data + A_inv_.offset(n); const Dtype* p_data_n = p_data + P_.offset(n); for (int c = 0; c < Z->channels(); c++){ const Dtype* z_data_nc = z_data + Z->offset(n, c); Dtype* buf_data_nc = buf_data + buf_.offset(n, c); Dtype* pred_data_nc = pred_data + Pred_.offset(n, c); // buf = P * Z caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), p_data_n, z_data_nc, Dtype(0), buf_data_nc); // pred = A_inv_ * pred caffe_gpu_gemv(CblasNoTrans, dim, dim, Dtype(1), a_data_n, buf_data_nc, Dtype(0), pred_data_nc); } } } template void CrfNormLossLayer<float>::Calc_A_gpu(void); template void CrfNormLossLayer<double>::Calc_A_gpu(void); template void CrfNormLossLayer<float>::Calc_RD_gpu(const vector<Blob<float>*>& bottom); template void CrfNormLossLayer<double>::Calc_RD_gpu(const vector<Blob<double>*>& bottom); template void CrfNormLossLayer<float>::Calc_G_gpu(const Blob<float>* bottom); template void CrfNormLossLayer<double>::Calc_G_gpu(const Blob<double>* bottom); template void CrfNormLossLayer<float>::Euclidean_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::Euclidean_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::Berhu_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::Berhu_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::ScaleInvariant_loss_gpu(const Blob<float>* gt, Blob<float>* top); template void CrfNormLossLayer<double>::ScaleInvariant_loss_gpu(const Blob<double>* gt, Blob<double>* top); template void CrfNormLossLayer<float>::Inference_gpu(const Blob<float>* Z); template void CrfNormLossLayer<double>::Inference_gpu(const Blob<double>* Z); template void CrfNormLossLayer<float>::Calc_A_inv_gpu(void); template void CrfNormLossLayer<double>::Calc_A_inv_gpu(void); template void CrfNormLossLayer<float>::Init_QP_gpu(void); template void CrfNormLossLayer<double>::Init_QP_gpu(void); template void CrfNormLossLayer<float>::Inference_scaleinvariant_gpu(const Blob<float>* Z); template void CrfNormLossLayer<double>::Inference_scaleinvariant_gpu(const Blob<double>* Z); INSTANTIATE_LAYER_GPU_FUNCS(CrfNormLossLayer); } // namespace caffe
722273d3d6c46ac20901b7d4edfba77b60859c01.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t cuTransB = (TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T; CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { hipblasOperation_t cuTransA = (TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N; CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } // X: src, Y: des void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { // copy X to Y CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
722273d3d6c46ac20901b7d4edfba77b60859c01.cu
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit #include <thrust/device_vector.h> #include <thrust/functional.h> // thrust::plus #include <thrust/reduce.h> #include <cmath> #include "caffe/common.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <> void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const double alpha, const double* A, const double* B, const double beta, double* C) { // Note that cublas follows fortran order. int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, N)); } template <> void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const double alpha, const double* A, const double* x, const double beta, double* y) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X, float* Y) { CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } template <> void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X, double* Y) { CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } // X: src, Y: des void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { // copy X to Y CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) } } template <> void caffe_gpu_scal<float>(const int N, const float alpha, float *X) { CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_scal<double>(const int N, const double alpha, double *X) { CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1)); } template <> void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal<float>(N, beta, Y); caffe_gpu_axpy<float>(N, alpha, X, Y); } template <> void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X, const double beta, double* Y) { caffe_gpu_scal<double>(N, beta, Y); caffe_gpu_axpy<double>(N, alpha, X, Y); } template <> void caffe_gpu_dot<float>(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_dot<double>(const int n, const double* x, const double* y, double * out) { CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); } template <> void caffe_gpu_asum<float>(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_asum<double>(const int n, const double* x, double* y) { CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y)); } template <> void caffe_gpu_scale<float>(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <> void caffe_gpu_scale<double>(const int n, const double alpha, const double *x, double* y) { CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1)); CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1)); } template <typename Dtype> __global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = alpha; } } template <typename Dtype> void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) { if (alpha == 0) { CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn) return; } // NOLINT_NEXT_LINE(whitespace/operators) set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template void caffe_gpu_set<int>(const int N, const int alpha, int* Y); template void caffe_gpu_set<float>(const int N, const float alpha, float* Y); template void caffe_gpu_set<double>(const int N, const double alpha, double* Y); template <typename Dtype> __global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] += alpha; } } template <> void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <> void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) { // NOLINT_NEXT_LINE(whitespace/operators) add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, alpha, Y); } template <typename Dtype> __global__ void add_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] + b[index]; } } template <> void caffe_gpu_add<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_add<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void sub_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] - b[index]; } } template <> void caffe_gpu_sub<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_sub<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void mul_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] * b[index]; } } template <> void caffe_gpu_mul<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_mul<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void div_kernel(const int n, const Dtype* a, const Dtype* b, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = a[index] / b[index]; } } template <> void caffe_gpu_div<float>(const int N, const float* a, const float* b, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <> void caffe_gpu_div<double>(const int N, const double* a, const double* b, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, b, y); } template <typename Dtype> __global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = abs(a[index]); } } template <> void caffe_gpu_abs<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_abs<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = exp(a[index]); } } template <> void caffe_gpu_exp<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_exp<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void log_kernel(const int n, const Dtype* a, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = log(a[index]); } } template <> void caffe_gpu_log<float>(const int N, const float* a, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <> void caffe_gpu_log<double>(const int N, const double* a, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, y); } template <typename Dtype> __global__ void powx_kernel(const int n, const Dtype* a, const Dtype alpha, Dtype* y) { CUDA_KERNEL_LOOP(index, n) { y[index] = pow(a[index], alpha); } } template <> void caffe_gpu_powx<float>(const int N, const float* a, const float alpha, float* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } template <> void caffe_gpu_powx<double>(const int N, const double* a, const double alpha, double* y) { // NOLINT_NEXT_LINE(whitespace/operators) powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>( N, a, alpha, y); } DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index]) - (x[index] < Dtype(0))); DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index])); void caffe_gpu_rng_uniform(const int n, unsigned int* r) { CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n)); } template <> void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b, float* r) { CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n)); const float range = b - a; if (range != static_cast<float>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<float>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b, double* r) { CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n)); const double range = b - a; if (range != static_cast<double>(1)) { caffe_gpu_scal(n, range, r); } if (a != static_cast<double>(0)) { caffe_gpu_add_scalar(n, a, r); } } template <> void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma, float* r) { CURAND_CHECK( curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma)); } template <> void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma, double* r) { CURAND_CHECK( curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma)); } } // namespace caffe
d9ea1bc1518899e03b624ae76432b66977b804de.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ---- // ---- Computes the potential field for a volume // ---- Input: volume file, dimensions: X, Y, Z, output file name // ---- Output: normalized potential field: // 1 vector for each point in the volume // // Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea // // // #define TRACE #include "potVect.h" #include <thrust/sort.h> #define BOUND_SIZE 1200000 struct compareStruct { __host__ __device__ bool operator()(VoxelPosition a, VoxelPosition b) { if(a.z != b.z) return a.z < b.z; else if(a.y != b.y) return a.y < b.y; else return a.x < b.x; } }; bool GetIndexOfBPInXYZRange( short sx, short sy, short sz, short ex, short ey, short ez, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex); bool GetIndexOfBPInZRange( short z1, short z2, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex); bool GetIndexOfBPInYRange( short y1, short y2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex); bool GetIndexOfBPInXRange( short x1, short x2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex); bool SortBoundaryArray(int numBound, VoxelPosition Bound[]); bool SortByX(int startAt, int endAt, VoxelPosition Bound[]); bool SortByY(int startAt, int endAt, VoxelPosition Bound[]); bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]); __global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght) { int k=blockIdx.x; int j=blockIdx.y; int i=threadIdx.x; int zStartIndex = 0; int zEndIndex = numBound- 1; int s; for (s = 0; s < numBound; s++) { if((k - Bound[s].z) <= PF_THRESHOLD) { zStartIndex = s; break; } } for (s = numBound-1; s >= zStartIndex; s--) { if((Bound[s].z - k) <= PF_THRESHOLD) { zEndIndex = s; break; } } int yStartIndex = zStartIndex; int yEndIndex = zEndIndex; for (s = zStartIndex; s <= zEndIndex; s++) { if((j - Bound[s].y) <= PF_THRESHOLD) { yStartIndex = s; break; } } for (s = zEndIndex; s >= yStartIndex; s--) { if((Bound[s].y - j) <= PF_THRESHOLD) { yEndIndex = s; break; } } int idx=k*slsz + j*L + i; force[idx].xd = 0.00; force[idx].yd = 0.00; force[idx].zd = 0.00; if(!inOut) { if(f[idx] == 0) { // outside voxels have null force return; } } else { // we don't know where the inside of the object is // so we compute the vector field everywhere. // NOTHING } if(f[idx] == SURF) return; if(f[idx] == BOUNDARY) return; int startIndex = yStartIndex; int endIndex = yEndIndex; for (s = yStartIndex; s <= yEndIndex; s++) { if((i - Bound[s].x) <= PF_THRESHOLD) { startIndex = s; break; } } for (s = yEndIndex; s >= startIndex; s--) { if((Bound[s].x - i) <= PF_THRESHOLD) { endIndex = s; break; } } if(endIndex < startIndex) { // no boundary point is close enough to this point - take all the boundary points startIndex = 0; endIndex = numBound - 1; } for (s = startIndex; s <= endIndex; s++) { // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z); /* // visibility test - too slow if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, Bound, numBound, &v1, &v2)) { // check if this boundary pont is visible from the current position if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) { // not visible continue; } } */ float v1 = i - Bound[s].x; float v2 = j - Bound[s].y; float v3 = k - Bound[s].z; float r, t; #ifdef EUCLIDEAN_METRIC // euclidean metric r = sqrtf(v1*v1 + v2*v2 + v3*v3); #else // simpler metric r = abs(v1) + abs(v2) + abs(v3); #endif // r CAN BE 0 if we are computing the force // at boundary voxels too // if the current point is a BOUNDARY point, // some r will be 0, and that should be // ignored if(r != 0.00) { // raise r to the fieldStrenght+1 power // so that the force is // 1/(dist^fieldStrength) t = 1.00; for(int p = 0; p <= fieldStrenght; p++) { t = t * r; } r = t; force[idx].xd = force[idx].xd + (v1 / r); force[idx].yd = force[idx].yd + (v2 / r); force[idx].zd = force[idx].zd + (v3 / r); } } } bool CalculatePotentialField( int L, int M, int N, // [in] size of volume unsigned char* f, // [in] volume flags int fieldStrenght, // [in] potential field strenght Vector* force, // [out] force field bool inOut // [in] flag indicating that we don't // know what the inside/outside of // the object is. We have only point // samples of the boundary. // DEFAULT: false (only interior) ) { int Lm1, Mm1, Nm1; int i,j,k, s, p; long idx, iidx, slsz, sz; VoxelPosition* Bound; int numBound = 0; bool flagSurf, flagBound; double r, t; int v1, v2, v3; int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex; // // check volume padding - fast version // if(!CheckVolumePadding(f, L, M, N)) { printf("** Error - Object touches bounding box. Abort.\n"); exit(1); } #ifdef _DEBUG printf("\t************ Potential Field calculation parameters: ******************\n"); #ifdef HALF_BOUNDARY_POINTS printf("\t** Using only HALF of the boundary points.\n"); #else printf("\t** Using ALL boundary points.\n"); #endif #ifdef EUCLIDEAN_METRIC printf("\t** Using EUCLIDEAN metric.\n"); #else printf("\t** Using NON EUCLIDEAN metric.\n"); #endif if(inOut) { printf("\t** Inside and Outside.\n"); } else { printf("\t** Inside ONLY.\n"); } printf("\t********* Potential Field calculation parameters - end ****************\n"); #endif if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) { printf("\nERROR allocating memory for boundary array! - Abort\n"); exit(1); } Lm1 = L - 1; Mm1 = M - 1; Nm1 = N - 1; slsz = L*M; // slice size sz = slsz*N; // save all the boundary voxels in array Bound[] for (k = 1; k < Nm1; k++) { for (j = 1; j < Mm1; j++) { for (i = 1; i < Lm1; i++) { flagSurf = false; flagBound = true; idx = k*slsz + j*L + i; // CASE 1: treat the inner layer if (f[idx] == 0) continue; //consider six face neighbors, if anyone is zero, it is a boundary voxel iidx = k*slsz + j*L + i-1; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select this one as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + j*L + i+1; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + (j-1)*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + (j+1)*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = (k-1)*slsz + j*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = (k+1)*slsz + j*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif } } } } } // restore idx to the right value idx = k*slsz + j*L + i; if (flagSurf) { f[idx] = SURF; if(flagBound) { // if no neighbour of this voxel is already marked as boundary, then mark this one. // or if we are taking all the boundary voxels // (in this case flagBound stays true) f[idx] = BOUNDARY; Bound[numBound].x = i; Bound[numBound].y = j; Bound[numBound].z = k; numBound++; if(numBound >= BOUND_SIZE) { printf("ERROR: too many boundary points detected !! - Abort.\n"); exit(1); } } } } } } //printf("numBound = %d \n", numBound); #ifdef _DEBUG PrintElapsedTime("\tPF-1: finding the boundary voxels."); printf("\t--Found %d boundary voxels.\n", numBound); #endif /* // output boundary voxels FILE *ff; unsigned char a; long int b; ff=fopen("bound.vol", "w"); for(idx=0; idx < L*M*N; idx++) { if(f[idx] == BOUNDARY) { a = 255; } else { a = 0; } b = random(); if(b > RAND_MAX/2) { a = 0; } fwrite(&a, sizeof(unsigned char), 1, ff); b = 0; } fclose(ff); exit(1); */ // sort the boundary array. SortBoundaryArray(numBound, Bound); #ifdef _DEBUG PrintElapsedTime("\tPF-2: sorting the boundary voxels."); #ifdef TRACE // print the boundary voxels for(i=0; i < numBound; i++) { printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z); } exit(1); #endif #endif // Compute the potential field printf("Computing potential field.\n"); dim3 dimBlock(L,1); dim3 dimGrid(N,M); VoxelPosition *d_bound; unsigned char* d_f; Vector* d_force; hipMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N); hipMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE); hipMalloc((void **)&d_force,sizeof(Vector)*L*M*N); hipMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,hipMemcpyHostToDevice); hipMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,hipMemcpyHostToDevice); hipMemcpy(d_force,force,sizeof(Vector)*L*M*N,hipMemcpyHostToDevice); hipLaunchKernelGGL(( compute_potential_field), dim3(dimGrid),dim3(dimBlock), 0, 0, d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght); hipMemcpy(force,d_force,sizeof(Vector)*L*M*N,hipMemcpyDeviceToHost); // idx = -1; // for (k = 0; k < N; k++) { // printf("\tProcessing plane %d out of %d\r", k, N-1); // fflush(stdout); // // find the boundary voxels that will influence this point // // look at the Z coordinate // zStartIndex = 0; // zEndIndex = numBound- 1; // for (s = 0; s < numBound; s++) { // if((k - Bound[s].z) <= PF_THRESHOLD) { // zStartIndex = s; // break; // } // } // for (s = numBound-1; s >= zStartIndex; s--) { // if((Bound[s].z - k) <= PF_THRESHOLD) { // zEndIndex = s; // break; // } // } // // printf("ZStart: %d\t ZEnd: %d\n", zStartIndex, zEndIndex); // for (j = 0; j < M; j++) { // // find the boundary voxels that will influence this point // // look at the Y coordinate // yStartIndex = zStartIndex; // yEndIndex = zEndIndex; // for (s = zStartIndex; s <= zEndIndex; s++) { // if((j - Bound[s].y) <= PF_THRESHOLD) { // yStartIndex = s; // break; // } // } // for (s = zEndIndex; s >= yStartIndex; s--) { // if((Bound[s].y - j) <= PF_THRESHOLD) { // yEndIndex = s; // break; // } // } // // printf("YStart: %d\t YEnd: %d\n", yStartIndex, yEndIndex); // for (i = 0; i < L; i++) { // // printf("Point: %d\t%d\t%d:\n", i, j, k); // // idx = k*slsz + j*L + i; // idx = idx + 1; // force[idx].xd = 0.00; // force[idx].yd = 0.00; // force[idx].zd = 0.00; // if(!inOut) { // if(f[idx] == 0) { // // outside voxels have null force // continue; // } // } // else { // // we don't know where the inside of the object is // // so we compute the vector field everywhere. // // NOTHING // } // // surface voxels (including those selected for the // // field calculation) // // are ignored for now. The force there will be // // the average of their neighbors // // if we are to compute the force at boundary // // voxels too, the force will point // // towards the exterior of the object // // (example: a 30x30x100 box) // if(f[idx] == SURF) continue; // if(f[idx] == BOUNDARY) continue; // // find the boundary voxels that will influence this point // // look at the X coordinate // startIndex = yStartIndex; // endIndex = yEndIndex; // for (s = yStartIndex; s <= yEndIndex; s++) { // if((i - Bound[s].x) <= PF_THRESHOLD) { // startIndex = s; // break; // } // } // for (s = yEndIndex; s >= startIndex; s--) { // if((Bound[s].x - i) <= PF_THRESHOLD) { // endIndex = s; // break; // } // } // // printf("Start at: %d, end at: %d\n", startIndex, endIndex); // // exit(-1); // if(endIndex < startIndex) { // // no boundary point is close enough to this point - take all the boundary points // startIndex = 0; // endIndex = numBound - 1; // } // for (s = startIndex; s <= endIndex; s++) { // // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z); // /* // // visibility test - too slow // if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, // Bound, numBound, &v1, &v2)) // { // // check if this boundary pont is visible from the current position // if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) { // // not visible // continue; // } // } // */ // v1 = i - Bound[s].x; // v2 = j - Bound[s].y; // v3 = k - Bound[s].z; // #ifdef EUCLIDEAN_METRIC // // euclidean metric // r = sqrt(v1*v1 + v2*v2 + v3*v3); // #else // // simpler metric // r = abs(v1) + abs(v2) + abs(v3); // #endif // // r CAN BE 0 if we are computing the force // // at boundary voxels too // // if the current point is a BOUNDARY point, // // some r will be 0, and that should be // // ignored // if(r != 0.00) { // // raise r to the fieldStrenght+1 power // // so that the force is // // 1/(dist^fieldStrength) // t = 1.00; // for(p = 0; p <= fieldStrenght; p++) { // t = t * r; // } // r = t; // force[idx].xd = force[idx].xd + (v1 / r); // force[idx].yd = force[idx].yd + (v2 / r); // force[idx].zd = force[idx].zd + (v3 / r); // } // } // /* // printf("First point with force vector != 0\n"); // printf("%f\t%f\t%f: %d, %d, %d\n", force[idx].xd, force[idx].yd, force[idx].zd, i, j, k); // exit(1); // */ // } // } // } // delete the Bound array - don't need it anymore delete [] Bound; #ifdef _DEBUG PrintElapsedTime("\tPF-3: computing potential field for inside voxels."); #endif // normalize force vectors: for(idx=0; idx < L*M*N; idx++) { if(!inOut) { // only for interior voxels we had calculated forces if(f[idx] == EXTERIOR) continue; } r = force[idx].xd*force[idx].xd + force[idx].yd*force[idx].yd + force[idx].zd*force[idx].zd; if(r > 0.00) { r = sqrt(r); force[idx].xd = force[idx].xd / r; force[idx].yd = force[idx].yd / r; force[idx].zd = force[idx].zd / r; } } #ifdef _DEBUG PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels."); #endif // if we know the inside from the outside // calculate the force at the surface voxels as the average of the // interior neighbors if (!inOut) { //neighbors: int ng[26]; // face neighbors ng[0] = + slsz + 0 + 0; ng[1] = - slsz + 0 + 0; ng[2] = + 0 + L + 0; ng[3] = + 0 - L + 0; ng[4] = + 0 + 0 + 1; ng[5] = + 0 + 0 - 1; // v-neighbors ng[6] = - slsz - L - 1; ng[7] = - slsz - L + 1; ng[8] = - slsz + L - 1; ng[9] = - slsz + L + 1; ng[10] = + slsz - L - 1; ng[11] = + slsz - L + 1; ng[12] = + slsz + L - 1; ng[13] = + slsz + L + 1; // e-neighbors ng[14] = + slsz + L + 0; ng[15] = + slsz - L + 0; ng[16] = - slsz + L + 0; ng[17] = - slsz - L + 0; ng[18] = + slsz + 0 + 1; ng[19] = + slsz + 0 - 1; ng[20] = - slsz + 0 + 1; ng[21] = - slsz + 0 - 1; ng[22] = + 0 + L + 1; ng[23] = + 0 + L - 1; ng[24] = + 0 - L + 1; ng[25] = + 0 - L - 1; for (k = 1; k < Nm1; k++) { for (j = 1; j < Mm1; j++) { for (i = 1; i < Lm1; i++) { idx = k*slsz + j*L + i; if((f[idx] == SURF) || (f[idx] == BOUNDARY)) { force[idx].xd = 0.00; force[idx].yd = 0.00; force[idx].zd = 0.00; // look at the neighbors and average the forces if not 0 // v1 = 0; for(s=0; s < 26; s++) { iidx = idx + ng[s]; // index of neighbor // take only neighbors that are not SURF or BOUNDARY // because those neighbors have force = 0 if(f[iidx] == SURF) continue; if(f[iidx] == BOUNDARY) continue; // if we know the interior of the object, take only interior // neighbors if(!inOut) { if(f[iidx] == EXTERIOR) continue; } force[idx].xd = force[idx].xd + force[iidx].xd; force[idx].yd = force[idx].yd + force[iidx].yd; force[idx].zd = force[idx].zd + force[iidx].zd; v1 = v1 + 1; } // average if(v1 != 0) { force[idx].xd = force[idx].xd / (double) v1; force[idx].yd = force[idx].yd / (double) v1; force[idx].zd = force[idx].zd / (double) v1; } else { printf("Boundary voxel has no interior neighbor !!! - Force = 0\n"); } // normalize r = force[idx].xd*force[idx].xd + force[idx].yd*force[idx].yd + force[idx].zd*force[idx].zd; if(r > 0.00) { r = sqrt(r); force[idx].xd = force[idx].xd / r; force[idx].yd = force[idx].yd / r; force[idx].zd = force[idx].zd / r; } } } } } } else { // we don't know the inside from the outside. // boundary points remain 0 // nothing to do } #ifdef _DEBUG PrintElapsedTime("\tPF-5: computing potential field for boundary voxels."); #endif return true; } // Sort the boundary array so that we can speed up the potential field calculation: ZYX in that order // selection sort /*bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) { int st, i; short zvst, yvst; // sort by Z SortByZ(0, numBound-1, Bound); // then by Y st = 0; zvst = Bound[st].z; for(i=0; i < numBound; i++) { if(Bound[i].z != zvst) { SortByY(st, i-1, Bound); st = i; zvst = Bound[st].z; } } SortByY(st, numBound-1, Bound); // then by X st = 0; zvst = Bound[st].z; yvst = Bound[st].y; for(i=0; i < numBound; i++) { if((Bound[i].y != yvst) || (Bound[i].z != zvst)) { SortByX(st, i-1, Bound); st = i; zvst = Bound[st].z; yvst = Bound[st].y; } } SortByX(st, numBound-1, Bound); return true; }*/ compareStruct comp; bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) { thrust::sort(Bound, Bound+numBound, comp); return true; } bool SortByX(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].x; for(j=i+1; j <= endAt; j++) { if(Bound[j].x < crtMin) { minIndex = j; crtMin = Bound[j].x; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } bool SortByY(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].y; for(j=i+1; j <= endAt; j++) { if(Bound[j].y < crtMin) { minIndex = j; crtMin = Bound[j].y; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].z; for(j=i+1; j <= endAt; j++) { if(Bound[j].z < crtMin) { minIndex = j; crtMin = Bound[j].z; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } // returns the start and endindex of boundary point found in a region // in space bound by a box defined by the 2 points. // it doesn't change startIndex or endIndex if it returns false; // returns true if it finds any boundary point in that region, or false otherwise. bool GetIndexOfBPInXYZRange( short sx, short sy, short sz, short ex, short ey, short ez, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex) { int si1, ei1, si2, ei2; // temporary start and end indexes // if(GetIndexOfBPInZRange(sz, ez, Bound, numBound, &si1, &ei1)) { if(GetIndexOfBPInYRange(sy, ey, Bound, numBound, si1, ei1, &si2, &ei2)) { if(GetIndexOfBPInXRange(sx, ex, Bound, numBound, si2, ei2, &si1, &ei1)) { (*startIndex) = si1; (*endIndex) = ei1; return true; } } } return false; } bool GetIndexOfBPInZRange( short z1, short z2, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex) { short minz, maxz; int s; int si; // sort the 2 z values; if(z1 < z2) { minz = z1; maxz = z2; } else { minz = z2; maxz = z1; } si = -1; for (s = 0; s < numBound; s++) { if((minz - Bound[s].z) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = numBound-1; s >= (*startIndex); s--) { if((Bound[s].z - maxz) < 0) { (*endIndex) = s; break; } } return true; } bool GetIndexOfBPInYRange( short y1, short y2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex) { short miny, maxy; int s; int si; // sort the 2 y values; if(y1 < y2) { miny = y1; maxy = y2; } else { miny = y2; maxy = y1; } // start the search at startAt and end it endAt si = -1; for (s = startAt; s <= endAt; s++) { if((miny - Bound[s].y) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = endAt; s >= (*startIndex); s--) { if((Bound[s].y - maxy) < 0) { (*endIndex) = s; break; } } return true; } bool GetIndexOfBPInXRange( short x1, short x2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex) { short minx, maxx; int s; int si; // sort the 2 x values; if(x1 < x2) { minx = x1; maxx = x2; } else { minx = x2; maxx = x1; } // start the search at startAt and end it endAt si = -1; for (s = startAt; s <= endAt; s++) { if((minx - Bound[s].x) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = endAt; s >= (*startIndex); s--) { if((Bound[s].x - maxx) < 0) { (*endIndex) = s; break; } } return true; }
d9ea1bc1518899e03b624ae76432b66977b804de.cu
// ---- // ---- Computes the potential field for a volume // ---- Input: volume file, dimensions: X, Y, Z, output file name // ---- Output: normalized potential field: // 1 vector for each point in the volume // // Last change: Thu May 15 15:20:38 EDT 2003 by Nicu D. Cornea // // // #define TRACE #include "potVect.h" #include <thrust/sort.h> #define BOUND_SIZE 1200000 struct compareStruct { __host__ __device__ bool operator()(VoxelPosition a, VoxelPosition b) { if(a.z != b.z) return a.z < b.z; else if(a.y != b.y) return a.y < b.y; else return a.x < b.x; } }; bool GetIndexOfBPInXYZRange( short sx, short sy, short sz, short ex, short ey, short ez, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex); bool GetIndexOfBPInZRange( short z1, short z2, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex); bool GetIndexOfBPInYRange( short y1, short y2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex); bool GetIndexOfBPInXRange( short x1, short x2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex); bool SortBoundaryArray(int numBound, VoxelPosition Bound[]); bool SortByX(int startAt, int endAt, VoxelPosition Bound[]); bool SortByY(int startAt, int endAt, VoxelPosition Bound[]); bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]); __global__ void compute_potential_field(VoxelPosition *Bound,Vector* force,int numBound,unsigned char* f,bool inOut,int slsz,int sz,int L, int fieldStrenght) { int k=blockIdx.x; int j=blockIdx.y; int i=threadIdx.x; int zStartIndex = 0; int zEndIndex = numBound- 1; int s; for (s = 0; s < numBound; s++) { if((k - Bound[s].z) <= PF_THRESHOLD) { zStartIndex = s; break; } } for (s = numBound-1; s >= zStartIndex; s--) { if((Bound[s].z - k) <= PF_THRESHOLD) { zEndIndex = s; break; } } int yStartIndex = zStartIndex; int yEndIndex = zEndIndex; for (s = zStartIndex; s <= zEndIndex; s++) { if((j - Bound[s].y) <= PF_THRESHOLD) { yStartIndex = s; break; } } for (s = zEndIndex; s >= yStartIndex; s--) { if((Bound[s].y - j) <= PF_THRESHOLD) { yEndIndex = s; break; } } int idx=k*slsz + j*L + i; force[idx].xd = 0.00; force[idx].yd = 0.00; force[idx].zd = 0.00; if(!inOut) { if(f[idx] == 0) { // outside voxels have null force return; } } else { // we don't know where the inside of the object is // so we compute the vector field everywhere. // NOTHING } if(f[idx] == SURF) return; if(f[idx] == BOUNDARY) return; int startIndex = yStartIndex; int endIndex = yEndIndex; for (s = yStartIndex; s <= yEndIndex; s++) { if((i - Bound[s].x) <= PF_THRESHOLD) { startIndex = s; break; } } for (s = yEndIndex; s >= startIndex; s--) { if((Bound[s].x - i) <= PF_THRESHOLD) { endIndex = s; break; } } if(endIndex < startIndex) { // no boundary point is close enough to this point - take all the boundary points startIndex = 0; endIndex = numBound - 1; } for (s = startIndex; s <= endIndex; s++) { // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z); /* // visibility test - too slow if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, Bound, numBound, &v1, &v2)) { // check if this boundary pont is visible from the current position if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) { // not visible continue; } } */ float v1 = i - Bound[s].x; float v2 = j - Bound[s].y; float v3 = k - Bound[s].z; float r, t; #ifdef EUCLIDEAN_METRIC // euclidean metric r = sqrtf(v1*v1 + v2*v2 + v3*v3); #else // simpler metric r = abs(v1) + abs(v2) + abs(v3); #endif // r CAN BE 0 if we are computing the force // at boundary voxels too // if the current point is a BOUNDARY point, // some r will be 0, and that should be // ignored if(r != 0.00) { // raise r to the fieldStrenght+1 power // so that the force is // 1/(dist^fieldStrength) t = 1.00; for(int p = 0; p <= fieldStrenght; p++) { t = t * r; } r = t; force[idx].xd = force[idx].xd + (v1 / r); force[idx].yd = force[idx].yd + (v2 / r); force[idx].zd = force[idx].zd + (v3 / r); } } } bool CalculatePotentialField( int L, int M, int N, // [in] size of volume unsigned char* f, // [in] volume flags int fieldStrenght, // [in] potential field strenght Vector* force, // [out] force field bool inOut // [in] flag indicating that we don't // know what the inside/outside of // the object is. We have only point // samples of the boundary. // DEFAULT: false (only interior) ) { int Lm1, Mm1, Nm1; int i,j,k, s, p; long idx, iidx, slsz, sz; VoxelPosition* Bound; int numBound = 0; bool flagSurf, flagBound; double r, t; int v1, v2, v3; int startIndex, tmpStartIndex, endIndex, tmpEndIndex, zStartIndex, zEndIndex, yStartIndex, yEndIndex; // // check volume padding - fast version // if(!CheckVolumePadding(f, L, M, N)) { printf("** Error - Object touches bounding box. Abort.\n"); exit(1); } #ifdef _DEBUG printf("\t************ Potential Field calculation parameters: ******************\n"); #ifdef HALF_BOUNDARY_POINTS printf("\t** Using only HALF of the boundary points.\n"); #else printf("\t** Using ALL boundary points.\n"); #endif #ifdef EUCLIDEAN_METRIC printf("\t** Using EUCLIDEAN metric.\n"); #else printf("\t** Using NON EUCLIDEAN metric.\n"); #endif if(inOut) { printf("\t** Inside and Outside.\n"); } else { printf("\t** Inside ONLY.\n"); } printf("\t********* Potential Field calculation parameters - end ****************\n"); #endif if((Bound = new VoxelPosition[BOUND_SIZE]) == NULL) { printf("\nERROR allocating memory for boundary array! - Abort\n"); exit(1); } Lm1 = L - 1; Mm1 = M - 1; Nm1 = N - 1; slsz = L*M; // slice size sz = slsz*N; // save all the boundary voxels in array Bound[] for (k = 1; k < Nm1; k++) { for (j = 1; j < Mm1; j++) { for (i = 1; i < Lm1; i++) { flagSurf = false; flagBound = true; idx = k*slsz + j*L + i; // CASE 1: treat the inner layer if (f[idx] == 0) continue; //consider six face neighbors, if anyone is zero, it is a boundary voxel iidx = k*slsz + j*L + i-1; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select this one as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + j*L + i+1; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + (j-1)*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = k*slsz + (j+1)*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = (k-1)*slsz + j*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif if(!flagSurf || flagBound) { iidx = (k+1)*slsz + j*L + i; if (f[iidx] == 0) { flagSurf = true; } #ifdef HALF_BOUNDARY_POINTS // consider only half of the boundary points else { if (f[iidx] == BOUNDARY) { // a neighbour of the point was already selected so we will not select it as part of the boundary. flagBound = false; } } #endif } } } } } // restore idx to the right value idx = k*slsz + j*L + i; if (flagSurf) { f[idx] = SURF; if(flagBound) { // if no neighbour of this voxel is already marked as boundary, then mark this one. // or if we are taking all the boundary voxels // (in this case flagBound stays true) f[idx] = BOUNDARY; Bound[numBound].x = i; Bound[numBound].y = j; Bound[numBound].z = k; numBound++; if(numBound >= BOUND_SIZE) { printf("ERROR: too many boundary points detected !! - Abort.\n"); exit(1); } } } } } } //printf("numBound = %d \n", numBound); #ifdef _DEBUG PrintElapsedTime("\tPF-1: finding the boundary voxels."); printf("\t--Found %d boundary voxels.\n", numBound); #endif /* // output boundary voxels FILE *ff; unsigned char a; long int b; ff=fopen("bound.vol", "w"); for(idx=0; idx < L*M*N; idx++) { if(f[idx] == BOUNDARY) { a = 255; } else { a = 0; } b = random(); if(b > RAND_MAX/2) { a = 0; } fwrite(&a, sizeof(unsigned char), 1, ff); b = 0; } fclose(ff); exit(1); */ // sort the boundary array. SortBoundaryArray(numBound, Bound); #ifdef _DEBUG PrintElapsedTime("\tPF-2: sorting the boundary voxels."); #ifdef TRACE // print the boundary voxels for(i=0; i < numBound; i++) { printf("%d %d %d 0.5\n", Bound[i].x, Bound[i].y, Bound[i].z); } exit(1); #endif #endif // Compute the potential field printf("Computing potential field.\n"); dim3 dimBlock(L,1); dim3 dimGrid(N,M); VoxelPosition *d_bound; unsigned char* d_f; Vector* d_force; cudaMalloc((void **)&d_f,sizeof(unsigned char)*L*M*N); cudaMalloc((void **)&d_bound,sizeof(VoxelPosition)*BOUND_SIZE); cudaMalloc((void **)&d_force,sizeof(Vector)*L*M*N); cudaMemcpy(d_f,f,sizeof(unsigned char)*L*M*N,cudaMemcpyHostToDevice); cudaMemcpy(d_bound,Bound,sizeof(VoxelPosition)*BOUND_SIZE,cudaMemcpyHostToDevice); cudaMemcpy(d_force,force,sizeof(Vector)*L*M*N,cudaMemcpyHostToDevice); compute_potential_field<<<dimGrid,dimBlock>>>(d_bound,d_force,numBound,d_f,inOut,slsz,sz,L, fieldStrenght); cudaMemcpy(force,d_force,sizeof(Vector)*L*M*N,cudaMemcpyDeviceToHost); // idx = -1; // for (k = 0; k < N; k++) { // printf("\tProcessing plane %d out of %d\r", k, N-1); // fflush(stdout); // // find the boundary voxels that will influence this point // // look at the Z coordinate // zStartIndex = 0; // zEndIndex = numBound- 1; // for (s = 0; s < numBound; s++) { // if((k - Bound[s].z) <= PF_THRESHOLD) { // zStartIndex = s; // break; // } // } // for (s = numBound-1; s >= zStartIndex; s--) { // if((Bound[s].z - k) <= PF_THRESHOLD) { // zEndIndex = s; // break; // } // } // // printf("ZStart: %d\t ZEnd: %d\n", zStartIndex, zEndIndex); // for (j = 0; j < M; j++) { // // find the boundary voxels that will influence this point // // look at the Y coordinate // yStartIndex = zStartIndex; // yEndIndex = zEndIndex; // for (s = zStartIndex; s <= zEndIndex; s++) { // if((j - Bound[s].y) <= PF_THRESHOLD) { // yStartIndex = s; // break; // } // } // for (s = zEndIndex; s >= yStartIndex; s--) { // if((Bound[s].y - j) <= PF_THRESHOLD) { // yEndIndex = s; // break; // } // } // // printf("YStart: %d\t YEnd: %d\n", yStartIndex, yEndIndex); // for (i = 0; i < L; i++) { // // printf("Point: %d\t%d\t%d:\n", i, j, k); // // idx = k*slsz + j*L + i; // idx = idx + 1; // force[idx].xd = 0.00; // force[idx].yd = 0.00; // force[idx].zd = 0.00; // if(!inOut) { // if(f[idx] == 0) { // // outside voxels have null force // continue; // } // } // else { // // we don't know where the inside of the object is // // so we compute the vector field everywhere. // // NOTHING // } // // surface voxels (including those selected for the // // field calculation) // // are ignored for now. The force there will be // // the average of their neighbors // // if we are to compute the force at boundary // // voxels too, the force will point // // towards the exterior of the object // // (example: a 30x30x100 box) // if(f[idx] == SURF) continue; // if(f[idx] == BOUNDARY) continue; // // find the boundary voxels that will influence this point // // look at the X coordinate // startIndex = yStartIndex; // endIndex = yEndIndex; // for (s = yStartIndex; s <= yEndIndex; s++) { // if((i - Bound[s].x) <= PF_THRESHOLD) { // startIndex = s; // break; // } // } // for (s = yEndIndex; s >= startIndex; s--) { // if((Bound[s].x - i) <= PF_THRESHOLD) { // endIndex = s; // break; // } // } // // printf("Start at: %d, end at: %d\n", startIndex, endIndex); // // exit(-1); // if(endIndex < startIndex) { // // no boundary point is close enough to this point - take all the boundary points // startIndex = 0; // endIndex = numBound - 1; // } // for (s = startIndex; s <= endIndex; s++) { // // printf("%d %d %d\n", i - Bound[s].x, j - Bound[s].y, k - Bound[s].z); // /* // // visibility test - too slow // if(GetIndexOfBPInXYZRange(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, // Bound, numBound, &v1, &v2)) // { // // check if this boundary pont is visible from the current position // if (IsLineCrossingBoundary(i, j, k, Bound[s].x, Bound[s].y, Bound[s].z, L, M, N, f)) { // // not visible // continue; // } // } // */ // v1 = i - Bound[s].x; // v2 = j - Bound[s].y; // v3 = k - Bound[s].z; // #ifdef EUCLIDEAN_METRIC // // euclidean metric // r = sqrt(v1*v1 + v2*v2 + v3*v3); // #else // // simpler metric // r = abs(v1) + abs(v2) + abs(v3); // #endif // // r CAN BE 0 if we are computing the force // // at boundary voxels too // // if the current point is a BOUNDARY point, // // some r will be 0, and that should be // // ignored // if(r != 0.00) { // // raise r to the fieldStrenght+1 power // // so that the force is // // 1/(dist^fieldStrength) // t = 1.00; // for(p = 0; p <= fieldStrenght; p++) { // t = t * r; // } // r = t; // force[idx].xd = force[idx].xd + (v1 / r); // force[idx].yd = force[idx].yd + (v2 / r); // force[idx].zd = force[idx].zd + (v3 / r); // } // } // /* // printf("First point with force vector != 0\n"); // printf("%f\t%f\t%f: %d, %d, %d\n", force[idx].xd, force[idx].yd, force[idx].zd, i, j, k); // exit(1); // */ // } // } // } // delete the Bound array - don't need it anymore delete [] Bound; #ifdef _DEBUG PrintElapsedTime("\tPF-3: computing potential field for inside voxels."); #endif // normalize force vectors: for(idx=0; idx < L*M*N; idx++) { if(!inOut) { // only for interior voxels we had calculated forces if(f[idx] == EXTERIOR) continue; } r = force[idx].xd*force[idx].xd + force[idx].yd*force[idx].yd + force[idx].zd*force[idx].zd; if(r > 0.00) { r = sqrt(r); force[idx].xd = force[idx].xd / r; force[idx].yd = force[idx].yd / r; force[idx].zd = force[idx].zd / r; } } #ifdef _DEBUG PrintElapsedTime("\tPF-4: normalizing force vectors for inside voxels."); #endif // if we know the inside from the outside // calculate the force at the surface voxels as the average of the // interior neighbors if (!inOut) { //neighbors: int ng[26]; // face neighbors ng[0] = + slsz + 0 + 0; ng[1] = - slsz + 0 + 0; ng[2] = + 0 + L + 0; ng[3] = + 0 - L + 0; ng[4] = + 0 + 0 + 1; ng[5] = + 0 + 0 - 1; // v-neighbors ng[6] = - slsz - L - 1; ng[7] = - slsz - L + 1; ng[8] = - slsz + L - 1; ng[9] = - slsz + L + 1; ng[10] = + slsz - L - 1; ng[11] = + slsz - L + 1; ng[12] = + slsz + L - 1; ng[13] = + slsz + L + 1; // e-neighbors ng[14] = + slsz + L + 0; ng[15] = + slsz - L + 0; ng[16] = - slsz + L + 0; ng[17] = - slsz - L + 0; ng[18] = + slsz + 0 + 1; ng[19] = + slsz + 0 - 1; ng[20] = - slsz + 0 + 1; ng[21] = - slsz + 0 - 1; ng[22] = + 0 + L + 1; ng[23] = + 0 + L - 1; ng[24] = + 0 - L + 1; ng[25] = + 0 - L - 1; for (k = 1; k < Nm1; k++) { for (j = 1; j < Mm1; j++) { for (i = 1; i < Lm1; i++) { idx = k*slsz + j*L + i; if((f[idx] == SURF) || (f[idx] == BOUNDARY)) { force[idx].xd = 0.00; force[idx].yd = 0.00; force[idx].zd = 0.00; // look at the neighbors and average the forces if not 0 // v1 = 0; for(s=0; s < 26; s++) { iidx = idx + ng[s]; // index of neighbor // take only neighbors that are not SURF or BOUNDARY // because those neighbors have force = 0 if(f[iidx] == SURF) continue; if(f[iidx] == BOUNDARY) continue; // if we know the interior of the object, take only interior // neighbors if(!inOut) { if(f[iidx] == EXTERIOR) continue; } force[idx].xd = force[idx].xd + force[iidx].xd; force[idx].yd = force[idx].yd + force[iidx].yd; force[idx].zd = force[idx].zd + force[iidx].zd; v1 = v1 + 1; } // average if(v1 != 0) { force[idx].xd = force[idx].xd / (double) v1; force[idx].yd = force[idx].yd / (double) v1; force[idx].zd = force[idx].zd / (double) v1; } else { printf("Boundary voxel has no interior neighbor !!! - Force = 0\n"); } // normalize r = force[idx].xd*force[idx].xd + force[idx].yd*force[idx].yd + force[idx].zd*force[idx].zd; if(r > 0.00) { r = sqrt(r); force[idx].xd = force[idx].xd / r; force[idx].yd = force[idx].yd / r; force[idx].zd = force[idx].zd / r; } } } } } } else { // we don't know the inside from the outside. // boundary points remain 0 // nothing to do } #ifdef _DEBUG PrintElapsedTime("\tPF-5: computing potential field for boundary voxels."); #endif return true; } // Sort the boundary array so that we can speed up the potential field calculation: ZYX in that order // selection sort /*bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) { int st, i; short zvst, yvst; // sort by Z SortByZ(0, numBound-1, Bound); // then by Y st = 0; zvst = Bound[st].z; for(i=0; i < numBound; i++) { if(Bound[i].z != zvst) { SortByY(st, i-1, Bound); st = i; zvst = Bound[st].z; } } SortByY(st, numBound-1, Bound); // then by X st = 0; zvst = Bound[st].z; yvst = Bound[st].y; for(i=0; i < numBound; i++) { if((Bound[i].y != yvst) || (Bound[i].z != zvst)) { SortByX(st, i-1, Bound); st = i; zvst = Bound[st].z; yvst = Bound[st].y; } } SortByX(st, numBound-1, Bound); return true; }*/ compareStruct comp; bool SortBoundaryArray(int numBound, VoxelPosition Bound[]) { thrust::sort(Bound, Bound+numBound, comp); return true; } bool SortByX(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].x; for(j=i+1; j <= endAt; j++) { if(Bound[j].x < crtMin) { minIndex = j; crtMin = Bound[j].x; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } bool SortByY(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].y; for(j=i+1; j <= endAt; j++) { if(Bound[j].y < crtMin) { minIndex = j; crtMin = Bound[j].y; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } bool SortByZ(int startAt, int endAt, VoxelPosition Bound[]) { int i, j, minIndex, crtMin; short tmp; for(i=startAt; i <= endAt; i++) { minIndex = -1; crtMin = Bound[i].z; for(j=i+1; j <= endAt; j++) { if(Bound[j].z < crtMin) { minIndex = j; crtMin = Bound[j].z; } } if(minIndex != -1) { // swap values. tmp = Bound[i].x; Bound[i].x = Bound[minIndex].x; Bound[minIndex].x = tmp; tmp = Bound[i].y; Bound[i].y = Bound[minIndex].y; Bound[minIndex].y = tmp; tmp = Bound[i].z; Bound[i].z = Bound[minIndex].z; Bound[minIndex].z = tmp; } } return true; } // returns the start and endindex of boundary point found in a region // in space bound by a box defined by the 2 points. // it doesn't change startIndex or endIndex if it returns false; // returns true if it finds any boundary point in that region, or false otherwise. bool GetIndexOfBPInXYZRange( short sx, short sy, short sz, short ex, short ey, short ez, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex) { int si1, ei1, si2, ei2; // temporary start and end indexes // if(GetIndexOfBPInZRange(sz, ez, Bound, numBound, &si1, &ei1)) { if(GetIndexOfBPInYRange(sy, ey, Bound, numBound, si1, ei1, &si2, &ei2)) { if(GetIndexOfBPInXRange(sx, ex, Bound, numBound, si2, ei2, &si1, &ei1)) { (*startIndex) = si1; (*endIndex) = ei1; return true; } } } return false; } bool GetIndexOfBPInZRange( short z1, short z2, VoxelPosition* Bound, int numBound, int* startIndex, int* endIndex) { short minz, maxz; int s; int si; // sort the 2 z values; if(z1 < z2) { minz = z1; maxz = z2; } else { minz = z2; maxz = z1; } si = -1; for (s = 0; s < numBound; s++) { if((minz - Bound[s].z) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = numBound-1; s >= (*startIndex); s--) { if((Bound[s].z - maxz) < 0) { (*endIndex) = s; break; } } return true; } bool GetIndexOfBPInYRange( short y1, short y2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex) { short miny, maxy; int s; int si; // sort the 2 y values; if(y1 < y2) { miny = y1; maxy = y2; } else { miny = y2; maxy = y1; } // start the search at startAt and end it endAt si = -1; for (s = startAt; s <= endAt; s++) { if((miny - Bound[s].y) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = endAt; s >= (*startIndex); s--) { if((Bound[s].y - maxy) < 0) { (*endIndex) = s; break; } } return true; } bool GetIndexOfBPInXRange( short x1, short x2, VoxelPosition* Bound, int numBound, int startAt, int endAt, int* startIndex, int* endIndex) { short minx, maxx; int s; int si; // sort the 2 x values; if(x1 < x2) { minx = x1; maxx = x2; } else { minx = x2; maxx = x1; } // start the search at startAt and end it endAt si = -1; for (s = startAt; s <= endAt; s++) { if((minx - Bound[s].x) < 0) { si = s; break; } } if(si == -1) { // couldn't find any boundary voxel return false; } (*startIndex) = si; for (s = endAt; s >= (*startIndex); s--) { if((Bound[s].x - maxx) < 0) { (*endIndex) = s; break; } } return true; }
48539e323b64ff26a71ca7d84c2111c5b46b33d1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/scale_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bias_layer_ && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1]) || (scale_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
48539e323b64ff26a71ca7d84c2111c5b46b33d1.cu
#include <cfloat> #include <vector> #include "caffe/scale_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void ScaleForward(const int n, const Dtype* in, const Dtype* scale, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index]; } } template <typename Dtype> __global__ void ScaleBiasForward(const int n, const Dtype* in, const Dtype* scale, const Dtype* bias, const int scale_dim, const int inner_dim, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { const int scale_index = (index / inner_dim) % scale_dim; out[index] = in[index] * scale[scale_index] + bias[scale_index]; } } template <typename Dtype> void ScaleLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int count = top[0]->count(); const Dtype* bottom_data = bottom[0]->gpu_data(); if (bottom[0] == top[0]) { // in-place computation; need to store bottom data before overwriting it. // Note that this is only necessary for Backward; we could skip this if not // doing Backward, but Caffe currently provides no way of knowing whether // we'll need to do Backward at the time of the Forward call. caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(), temp_.mutable_gpu_data()); } const Dtype* scale_data = ((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); if (bias_layer_) { const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data(); ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_, top_data); } else { ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data); } } template <typename Dtype> void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (bias_layer_ && this->param_propagate_down_[this->param_propagate_down_.size() - 1]) { bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_); } const bool scale_param = (bottom.size() == 1); Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1]; if ((!scale_param && propagate_down[1]) || (scale_param && this->param_propagate_down_[0])) { const Dtype* top_diff = top[0]->gpu_diff(); const bool in_place = (bottom[0] == top[0]); const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data(); // Hack: store big eltwise product in bottom[0] diff, except in the special // case where this layer itself does the eltwise product, in which case we // can store it directly in the scale diff, and we're done. // If we're computing in-place (and not doing eltwise computation), this // hack doesn't work and we store the product in temp_. const bool is_eltwise = (bottom[0]->count() == scale->count()); Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() : (in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff())); caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product); if (!is_eltwise) { Dtype* sum_result = NULL; if (inner_dim_ == 1) { sum_result = product; } else if (sum_result_.count() == 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(inner_dim_, product, sum_mult, &result); *scale_diff += result; } else { caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff); } } else { const Dtype* sum_mult = sum_multiplier_.gpu_data(); sum_result = (outer_dim_ == 1) ? scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data(); caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_, Dtype(1), product, sum_mult, Dtype(0), sum_result); } if (outer_dim_ != 1) { const Dtype* sum_mult = sum_multiplier_.gpu_data(); if (scale_dim_ == 1) { Dtype* scale_diff = scale->mutable_cpu_diff(); if (scale_param) { Dtype result; caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result); *scale_diff += result; } else { caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff); } } else { Dtype* scale_diff = scale->mutable_gpu_diff(); caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_, Dtype(1), sum_result, sum_mult, Dtype(scale_param), scale_diff); } } } } if (propagate_down[0]) { const int count = top[0]->count(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* scale_data = scale->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer); } // namespace caffe
069b0e253a1d0ca11c0140b3823e4480daa36bda.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "graph.h" #include "sssp.h" #include "timer.h" //#define INF 1000000000 __constant__ unsigned INF = 1000000000; __global__ void initialize(Graph G, unsigned* d_dist, uint64_t nnodes){ uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; if(gid < nnodes) { d_dist[gid] = INF; //printf("%ld\n",INF); // works fine // printf("%d",d_dist[gid]); } //printf("%d",G.d_offset[gid]); } __device__ bool processedge(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode, unsigned i, uint64_t& dst) { dst = G. getDest(worknode,i); // get the i-th neighbor of worknode if(dst >= nnodes) return false; unsigned wt = G.getWt(worknode, i); // get edge-weight of the i-th edge if(wt >= INF) return false; unsigned altdist = d_dist[worknode] + wt; if(altdist < d_dist[dst]) { // a possible site for thread divergence unsigned olddist = atomicMin(&d_dist[dst], altdist); if(altdist < olddist) return true; // dist is updated to a lower value (another possible site for thread divergence) } return false; } __device__ bool processnode(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode) { if(worknode >= nnodes) return false; bool changed = false; // thread-local unsigned outDegree = G.getDegree(worknode); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = nnodes; unsigned olddist = processedge(G, nnodes, d_dist, worknode, i, dst); if(olddist) changed = true; } return changed; } __global__ void ssspCompute(Graph G, uint64_t nnodes, uint64_t nedges, unsigned* d_dist, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(processnode(G, nnodes, d_dist, src)) *d_changed = true; } void sssp_parallel(Graph& G, unsigned* h_dist, unsigned* d_dist, uint64_t _src, unsigned num_blocks, unsigned block_size) { hipProfilerStart(); // start of profiling region hipLaunchKernelGGL(( initialize), dim3(num_blocks), dim3(block_size), 0, 0, G, d_dist, G.h_nnodes); hipProfilerStop(); // end of profiling region hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError() ); bool h_changed, *d_changed; gpuErrchk(hipMalloc(&d_changed,sizeof(bool))); unsigned zero = 0; // the distance zero from source uint64_t src = _src; // setting the source vertex by specifying the node-id. gpuErrchk(hipMemcpy(&d_dist[src],&zero, sizeof(zero), hipMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(hipMemcpy(d_changed, &h_changed, sizeof(h_changed), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( ssspCompute) , dim3(num_blocks), dim3(block_size), 0, 0, G, G.h_nnodes, G.h_nedges, d_dist, d_changed ); hipDeviceSynchronize(); gpuErrchk(hipPeekAtLastError() ); gpuErrchk(hipMemcpy(&h_changed, d_changed, sizeof(h_changed), hipMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(hipMemcpy(h_dist, d_dist, G.h_nnodes * sizeof(unsigned), hipMemcpyDeviceToHost)); }
069b0e253a1d0ca11c0140b3823e4480daa36bda.cu
#include <stdio.h> #include <cuda_runtime.h> #include <cuda.h> #include "graph.h" #include "sssp.h" #include "timer.h" //#define INF 1000000000 __constant__ unsigned INF = 1000000000; __global__ void initialize(Graph G, unsigned* d_dist, uint64_t nnodes){ uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; if(gid < nnodes) { d_dist[gid] = INF; //printf("%ld\n",INF); // works fine // printf("%d",d_dist[gid]); } //printf("%d",G.d_offset[gid]); } __device__ bool processedge(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode, unsigned i, uint64_t& dst) { dst = G. getDest(worknode,i); // get the i-th neighbor of worknode if(dst >= nnodes) return false; unsigned wt = G.getWt(worknode, i); // get edge-weight of the i-th edge if(wt >= INF) return false; unsigned altdist = d_dist[worknode] + wt; if(altdist < d_dist[dst]) { // a possible site for thread divergence unsigned olddist = atomicMin(&d_dist[dst], altdist); if(altdist < olddist) return true; // dist is updated to a lower value (another possible site for thread divergence) } return false; } __device__ bool processnode(Graph& G, uint64_t nnodes, unsigned* d_dist, uint64_t worknode) { if(worknode >= nnodes) return false; bool changed = false; // thread-local unsigned outDegree = G.getDegree(worknode); for(unsigned i=0; i<outDegree; ++i) { uint64_t dst = nnodes; unsigned olddist = processedge(G, nnodes, d_dist, worknode, i, dst); if(olddist) changed = true; } return changed; } __global__ void ssspCompute(Graph G, uint64_t nnodes, uint64_t nedges, unsigned* d_dist, bool* d_changed) { uint64_t gid = threadIdx.x + blockDim.x * blockIdx.x; uint64_t src = gid; // node under consideration if(processnode(G, nnodes, d_dist, src)) *d_changed = true; } void sssp_parallel(Graph& G, unsigned* h_dist, unsigned* d_dist, uint64_t _src, unsigned num_blocks, unsigned block_size) { cudaProfilerStart(); // start of profiling region initialize<<<num_blocks, block_size>>>(G, d_dist, G.h_nnodes); cudaProfilerStop(); // end of profiling region cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError() ); bool h_changed, *d_changed; gpuErrchk(cudaMalloc(&d_changed,sizeof(bool))); unsigned zero = 0; // the distance zero from source uint64_t src = _src; // setting the source vertex by specifying the node-id. gpuErrchk(cudaMemcpy(&d_dist[src],&zero, sizeof(zero), cudaMemcpyHostToDevice)); do { h_changed = false; gpuErrchk(cudaMemcpy(d_changed, &h_changed, sizeof(h_changed), cudaMemcpyHostToDevice)); ssspCompute <<<num_blocks, block_size>>> (G, G.h_nnodes, G.h_nedges, d_dist, d_changed ); cudaDeviceSynchronize(); gpuErrchk(cudaPeekAtLastError() ); gpuErrchk(cudaMemcpy(&h_changed, d_changed, sizeof(h_changed), cudaMemcpyDeviceToHost)); } while(h_changed); gpuErrchk(cudaMemcpy(h_dist, d_dist, G.h_nnodes * sizeof(unsigned), cudaMemcpyDeviceToHost)); }