hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
95f40e81fb151c4f82384826ec89268e55121fd7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <THH/THHNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override, const bool count_include_pad, const bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { top_data[index] = scalar_t(0); continue; } accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_out_cuda_frame_nhwc(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override, const bool count_include_pad, const bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int pw = (index / channels) % pooled_width; const int ph = (index / channels / pooled_width) % pooled_height; const int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { top_data[index] = scalar_t(0); continue; } accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + n * channels * height * width + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[(h * width + w) * channels]; } } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override, bool count_include_pad, bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { continue; } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_backward_out_cuda_frame_nhwc(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override, bool count_include_pad, bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int w = (index / channels) % width; const int h = (index / channels / width) % height; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + n * channels * pooled_height * pooled_width + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { continue; } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast){ TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int32_t count = safe_downcast<int32_t, int64_t>(output.numel()); const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads); bool use_divisor = divisor_override.has_value(); const auto divisor_override_value = use_divisor ? divisor_override.value() : 0; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); switch (memory_format){ case MemoryFormat::ChannelsLast: { output.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast); hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame_nhwc<scalar_t, accscalar_t>) , dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override_value, count_include_pad, use_divisor); break; } case MemoryFormat::Contiguous: { hipLaunchKernelGGL(( avg_pool2d_out_cuda_frame<scalar_t, accscalar_t>) , dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override_value, count_include_pad, use_divisor); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); AT_CUDA_CHECK(hipGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int32_t count = safe_downcast<int32_t, int64_t>(input.numel()); const uint32_t num_threads = ::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads); bool use_divisor = divisor_override.has_value(); const auto divisor_override_value = use_divisor ? divisor_override.value() : 0; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { gradInput.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast); hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame_nhwc<scalar_t, accscalar_t>) , dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override_value, count_include_pad, use_divisor); break; } case MemoryFormat::Contiguous: { hipLaunchKernelGGL(( avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t>) , dim3(num_blocks), dim3(num_threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override_value, count_include_pad, use_divisor); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); AT_CUDA_CHECK(hipGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
95f40e81fb151c4f82384826ec89268e55121fd7.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/native/Pool.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <THC/THCNumerics.cuh> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { __device__ inline int min(int a, int b) { return a <= b ? a : b; } __device__ inline int max(int a, int b) { return a >= b ? a : b; } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_out_cuda_frame(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override, const bool count_include_pad, const bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { top_data[index] = scalar_t(0); continue; } accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_out_cuda_frame_nhwc(const int nthreads, const scalar_t* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const top_data, const int divisor_override, const bool count_include_pad, const bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int pw = (index / channels) % pooled_width; const int ph = (index / channels / pooled_width) % pooled_height; const int n = index / channels / pooled_width / pooled_height; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { top_data[index] = scalar_t(0); continue; } accscalar_t aveval = accscalar_t(0); const scalar_t* const bottom_slice = bottom_data + n * channels * height * width + c; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[(h * width + w) * channels]; } } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } top_data[index] = ScalarConvert<accscalar_t, scalar_t>::to(aveval / divide_factor); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_backward_out_cuda_frame(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override, bool count_include_pad, bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { continue; } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[ph * pooled_width + pw] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } template <typename scalar_t, typename accscalar_t> __global__ void avg_pool2d_backward_out_cuda_frame_nhwc(const int nthreads, const scalar_t* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, scalar_t* const bottom_diff, const int divisor_override, bool count_include_pad, bool use_divisor) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = index % channels; const int w = (index / channels) % width; const int h = (index / channels / width) % height; const int n = index / channels / width / height; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); accscalar_t gradient = accscalar_t(0); const scalar_t* const top_diff_slice = top_diff + n * channels * pooled_height * pooled_width + c; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); if (hstart >= hend || wstart >= wend) { continue; } int divide_factor; if (use_divisor) { divide_factor = divisor_override; } else { if(count_include_pad) { divide_factor = pool_size; } else { divide_factor = (hend - hstart) * (wend - wstart); } } gradient += top_diff_slice[(ph * pooled_width + pw) * channels] / divide_factor; } } bottom_diff[index] = ScalarConvert<accscalar_t, scalar_t>::to(gradient); } } void avg_pool2d_out_cuda_template( Tensor& output, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg output_arg{ output, "output", 1 }; TensorArg input_arg{ input_, "input_", 2 }; checkAllSameGPU("avg_pool2d_out_cuda", {output_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast){ TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const int64_t nbatch = input_.ndimension() == 4 ? input_.size(-4) : 1; const int64_t nInputPlane = input_.size(-3); const int64_t inputHeight = input_.size(-2); const int64_t inputWidth = input_.size(-1); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); pool2d_shape_check( input_, kH, kW, dH, dW, padH, padW, 1, 1, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); Tensor input = input_.contiguous(memory_format); output.resize_({nbatch, nInputPlane, outputHeight, outputWidth}); const int32_t count = safe_downcast<int32_t, int64_t>(output.numel()); const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads); bool use_divisor = divisor_override.has_value(); const auto divisor_override_value = use_divisor ? divisor_override.value() : 0; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "avg_pool2d_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *output_data = output.data_ptr<scalar_t>(); scalar_t *input_data = input.data_ptr<scalar_t>(); switch (memory_format){ case MemoryFormat::ChannelsLast: { output.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast); avg_pool2d_out_cuda_frame_nhwc<scalar_t, accscalar_t> <<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override_value, count_include_pad, use_divisor); break; } case MemoryFormat::Contiguous: { avg_pool2d_out_cuda_frame<scalar_t, accscalar_t> <<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, input_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, output_data, divisor_override_value, count_include_pad, use_divisor); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); AT_CUDA_CHECK(cudaGetLastError()); if (input.ndimension() == 3) { output.resize_({nInputPlane, outputHeight, outputWidth}); } } Tensor& avg_pool2d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { TensorArg gradInput_arg{ gradInput, "gradInput", 1 }; TensorArg gradOutput_arg{ gradOutput_, "gradOutput_", 2 }; TensorArg input_arg{ input_, "input_", 3 }; checkAllSameGPU("avg_pool2d_backward_out_cuda", {gradInput_arg, gradOutput_arg, input_arg}); // #20866, #22032: Guarantee this for the official C++ API? TORCH_CHECK(kernel_size.size() == 1 || kernel_size.size() == 2, "avg_pool2d: kernel_size must either be a single int, or a tuple of two ints"); const int kH = safe_downcast<int, int64_t>(kernel_size[0]); const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]); TORCH_CHECK(stride.empty() || stride.size() == 1 || stride.size() == 2, "avg_pool2d: stride must either be omitted, a single int, or a tuple of two ints"); const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]); const int dW = stride.empty() ? kW : stride.size() == 1 ? dH : safe_downcast<int, int64_t>(stride[1]); TORCH_CHECK(padding.size() == 1 || padding.size() == 2, "avg_pool2d: padding must either be a single int, or a tuple of two ints"); const int padH = safe_downcast<int, int64_t>(padding[0]); const int padW = padding.size() == 1 ? padH : safe_downcast<int, int64_t>(padding[1]); TORCH_CHECK(!divisor_override.has_value() || divisor_override.value() != 0, "divisor must be not zero"); const auto memory_format = input_.suggest_memory_format(); if (memory_format == at::MemoryFormat::ChannelsLast) { TORCH_CHECK(input_.ndimension() == 4, "non-empty 4D (batch mode) tensor expected for input with channels_last layout"); } else { TORCH_CHECK((input_.ndimension() == 3 || input_.ndimension() == 4), "non-empty 3D or 4D (batch mode) tensor expected for input"); } const Tensor input = input_.contiguous(memory_format); const Tensor gradOutput = gradOutput_.contiguous(memory_format); const int64_t nbatch = input.ndimension() == 4 ? input.size(-4) : 1; const int64_t nInputPlane = input.size(-3); const int64_t inputHeight = input.size(-2); const int64_t inputWidth = input.size(-1); const int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, 1, ceil_mode); const int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, 1, ceil_mode); avg_pool2d_backward_shape_check( input_, gradOutput_, nbatch, kH, kW, dH, dW, padH, padW, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth); gradInput.resize_as_(input); const int32_t count = safe_downcast<int32_t, int64_t>(input.numel()); const uint32_t num_threads = std::min(at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); const uint32_t num_blocks = cuda::ATenCeilDiv<uint32_t>(count, num_threads); bool use_divisor = divisor_override.has_value(); const auto divisor_override_value = use_divisor ? divisor_override.value() : 0; AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "avg_pool2d_backward_out_cuda_frame", [&] { using accscalar_t = acc_type<scalar_t, true>; scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>(); scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>(); switch (memory_format) { case MemoryFormat::ChannelsLast: { gradInput.unsafeGetTensorImpl()->empty_tensor_restride(MemoryFormat::ChannelsLast); avg_pool2d_backward_out_cuda_frame_nhwc<scalar_t, accscalar_t> <<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override_value, count_include_pad, use_divisor); break; } case MemoryFormat::Contiguous: { avg_pool2d_backward_out_cuda_frame<scalar_t, accscalar_t> <<<num_blocks, num_threads, 0, at::cuda::getCurrentCUDAStream()>>>( count, gradOutput_data, nbatch, nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, dH, dW, padH, padW, gradInput_data, divisor_override_value, count_include_pad, use_divisor); break; } default: TORCH_CHECK(false, "Unsupported memory format. Supports only ChannelsLast, Contiguous"); } } ); AT_CUDA_CHECK(cudaGetLastError()); return gradInput; } } // namespace Tensor& avg_pool2d_out_cuda( Tensor& output, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor avg_pool2d_cuda( const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { Tensor output = at::empty({0}, input.options()); avg_pool2d_out_cuda_template( output, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return output; } Tensor& avg_pool2d_backward_out_cuda( Tensor& gradInput, const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } Tensor avg_pool2d_backward_cuda( const Tensor& gradOutput_, const Tensor& input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) { auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT); avg_pool2d_backward_out_cuda_template( gradInput, gradOutput_, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override); return gradInput; } } // at::native } // at
bf6372822e2fd1deb74756b7c252686ca94e13d4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "cuda_globals.cuh" #include "gl.cuh" #include "const.cuh" #include "globals_hip.cuh" static char* field = NULL; static char* getFieldPtrAt(int x, int y) { if (x > width || y > height) { return NULL; } return field + (x + (y * width)); } static void unloadField() { if (d_outfield) { hipFree(d_outfield); d_outfield = NULL; } if (d_field) { hipFree(d_field); d_field = NULL; } if (field) { free(field); field = NULL; } } static int loadFile(const char* fileName) { unloadField(); int fileWidth, fileHeight; FILE* fd = fopen(fileName, "rb"); if (!fd) { return 1; } fscanf(fd, "%d %d", &fileWidth, &fileHeight); printf("Got file dimensions %d / %d\n", fileWidth, fileHeight); width = fileWidth + 2; height = fileHeight + 2; if (width % threadsPerBlock.x) { width += threadsPerBlock.x - (width % threadsPerBlock.x); } if (height % threadsPerBlock.y) { height += threadsPerBlock.y - (height % threadsPerBlock.y); } numBlocks.x = width / threadsPerBlock.x; numBlocks.y = height / threadsPerBlock.y; size_t memorySize = (size_t)(width * height); field = (char*)malloc(memorySize); memset(field, CELL_EMPTY, memorySize); int charIn; char charOut; int x = 0; int y = 0; do { charIn = fgetc(fd); if (charIn < 0) { break; } if (charIn == '\r' || charIn == '\n') { continue; } switch (charIn) { case FILE_CELL_CONDUCTOR: charOut = CELL_CONDUCTOR; break; case FILE_CELL_ELECTRON_HEAD: charOut = CELL_ELECTRON_HEAD; break; case FILE_CELL_ELECTRON_TAIL: charOut = CELL_ELECTRON_TAIL; break; default: charOut = CELL_EMPTY; break; } *getFieldPtrAt(x + 1, y + 1) = charOut; if (++x >= fileWidth) { y++; x = 0; } } while (!feof(fd)); printf("File loaded: %s\n", fileName); fclose(fd); hipMalloc(&d_field, memorySize); hipMalloc(&d_outfield, memorySize); hipMemcpy(d_field, field, memorySize, hipMemcpyHostToDevice); hipMemcpy(d_outfield, field, memorySize, hipMemcpyHostToDevice); return 0; } int main(int argc, char** argv) { if (loadFile(argv[1])) { unloadField(); return 1; } if (initGL(&argc, argv)) { unloadField(); return 1; } deinitGL(); unloadField(); }
bf6372822e2fd1deb74756b7c252686ca94e13d4.cu
#include <stdio.h> #include "cuda_globals.cuh" #include "gl.cuh" #include "const.cuh" #include "globals.cuh" static char* field = NULL; static char* getFieldPtrAt(int x, int y) { if (x > width || y > height) { return NULL; } return field + (x + (y * width)); } static void unloadField() { if (d_outfield) { cudaFree(d_outfield); d_outfield = NULL; } if (d_field) { cudaFree(d_field); d_field = NULL; } if (field) { free(field); field = NULL; } } static int loadFile(const char* fileName) { unloadField(); int fileWidth, fileHeight; FILE* fd = fopen(fileName, "rb"); if (!fd) { return 1; } fscanf(fd, "%d %d", &fileWidth, &fileHeight); printf("Got file dimensions %d / %d\n", fileWidth, fileHeight); width = fileWidth + 2; height = fileHeight + 2; if (width % threadsPerBlock.x) { width += threadsPerBlock.x - (width % threadsPerBlock.x); } if (height % threadsPerBlock.y) { height += threadsPerBlock.y - (height % threadsPerBlock.y); } numBlocks.x = width / threadsPerBlock.x; numBlocks.y = height / threadsPerBlock.y; size_t memorySize = (size_t)(width * height); field = (char*)malloc(memorySize); memset(field, CELL_EMPTY, memorySize); int charIn; char charOut; int x = 0; int y = 0; do { charIn = fgetc(fd); if (charIn < 0) { break; } if (charIn == '\r' || charIn == '\n') { continue; } switch (charIn) { case FILE_CELL_CONDUCTOR: charOut = CELL_CONDUCTOR; break; case FILE_CELL_ELECTRON_HEAD: charOut = CELL_ELECTRON_HEAD; break; case FILE_CELL_ELECTRON_TAIL: charOut = CELL_ELECTRON_TAIL; break; default: charOut = CELL_EMPTY; break; } *getFieldPtrAt(x + 1, y + 1) = charOut; if (++x >= fileWidth) { y++; x = 0; } } while (!feof(fd)); printf("File loaded: %s\n", fileName); fclose(fd); cudaMalloc(&d_field, memorySize); cudaMalloc(&d_outfield, memorySize); cudaMemcpy(d_field, field, memorySize, cudaMemcpyHostToDevice); cudaMemcpy(d_outfield, field, memorySize, cudaMemcpyHostToDevice); return 0; } int main(int argc, char** argv) { if (loadFile(argv[1])) { unloadField(); return 1; } if (initGL(&argc, argv)) { unloadField(); return 1; } deinitGL(); unloadField(); }
290984571881f5e9e8e477f2a95db9e5663e4633.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, const float * const *dAarray, magma_int_t ldda, float **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX REAL The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); hipLaunchKernelGGL(( sgeadd_batched_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dAarray, ldda, dBarray, lddb ); }
290984571881f5e9e8e477f2a95db9e5663e4633.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* ===================================================================== Batches slacpy of multiple arrays; y-dimension of grid is different arrays, x-dimension of grid is blocks for each array. Matrix is m x n, and is divided into block rows, each NB x n. Each CUDA block has NB threads to handle one block row. Each thread adds one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. TODO. Block in both directions, for large matrices. E.g., each block does 64x64 tile, instead of 64xN tile. */ __global__ void sgeadd_batched_kernel( int m, int n, float alpha, const float * const *dAarray, int ldda, float **dBarray, int lddb ) { // dA and dB iterate across row i const float *dA = dAarray[ blockIdx.y ]; float *dB = dBarray[ blockIdx.y ]; int i = blockIdx.x*blockDim.x + threadIdx.x; if ( i < m ) { dA += i; dB += i; const float *dAend = dA + n*ldda; while( dA < dAend ) { *dB = alpha*(*dA) + (*dB); dA += ldda; dB += lddb; } } } /* ===================================================================== */ extern "C" void magmablas_sgeadd_batched( magma_int_t m, magma_int_t n, float alpha, const float * const *dAarray, magma_int_t ldda, float **dBarray, magma_int_t lddb, magma_int_t batchCount ) { /* Purpose ======= ZGEADD adds two sets of matrices, dAarray[i] = alpha*dAarray[i] + dBarray[i], for i = 0, ..., batchCount-1. Arguments ========= M (input) INTEGER The number of rows of each matrix dAarray[i]. M >= 0. N (input) INTEGER The number of columns of each matrix dAarray[i]. N >= 0. ALPHA (input) COMPLEX REAL The scalar alpha. dAarray (input) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDA,N) The m by n matrices dAarray[i]. LDDA (input) INTEGER The leading dimension of each array dAarray[i]. LDDA >= max(1,M). dBarray (input/output) array on GPU, dimension(batchCount), of pointers to arrays, with each array a COMPLEX REAL array, dimension (LDDB,N) The m by n matrices dBarray[i]. LDDB (input) INTEGER The leading dimension of each array dBarray[i]. LDDB >= max(1,M). batchCount (input) INTEGER The number of matrices to add; length of dAarray and dBarray. batchCount >= 0. ===================================================================== */ magma_int_t info = 0; if ( m < 0 ) info = -1; else if ( n < 0 ) info = -2; else if ( ldda < max(1,m)) info = -5; else if ( lddb < max(1,m)) info = -7; else if ( batchCount < 0 ) info = -8; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; } if ( m == 0 || n == 0 || batchCount == 0 ) return; dim3 threads( NB ); dim3 grid( (m + NB - 1)/NB, batchCount ); sgeadd_batched_kernel<<< grid, threads, 0, magma_stream >>>( m, n, alpha, dAarray, ldda, dBarray, lddb ); }
ef77a36050e5a8600f1449ebe01bf3ce8a2f8c32.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <hip/hip_runtime.h> #include "Common.h" #include "optixRaycastingKernels.h" inline int idivCeil( int x, int y ) { return (x + y-1)/y; } __global__ void createRaysOrthoKernel( Ray* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { const int rayx = threadIdx.x + blockIdx.x*blockDim.x; const int rayy = threadIdx.y + blockIdx.y*blockDim.y; if( rayx >= width || rayy >= height ) return; const int idx = rayx + rayy*width; rays[idx].origin = make_float3( x0+rayx*dx, y0+rayy*dy, z ); rays[idx].tmin = 0.0f; rays[idx].dir = make_float3( 0, 0, 1 ); rays[idx].tmax = 1e34f; } // Note: uses left handed coordinate system void createRaysOrthoOnDevice( Ray* rays_device, int width, int height, float3 bbmin, float3 bbmax, float padding ) { const float3 bbspan = bbmax - bbmin; float dx = bbspan.x * (1 + 2*padding) / width; float dy = bbspan.y * (1 + 2*padding) / height; float x0 = bbmin.x - bbspan.x*padding + dx/2; float y0 = bbmin.y - bbspan.y*padding + dy/2; float z = bbmin.z - fmaxf(bbspan.z,1.0f)*.001f; dim3 blockSize( 32, 16 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) ); hipLaunchKernelGGL(( createRaysOrthoKernel), dim3(gridSize),dim3(blockSize), 0, 0, rays_device, width, height, x0, y0, z, dx, dy ); } __global__ void translateRaysKernel( Ray* rays, int count, float3 offset) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; rays[idx].origin = rays[idx].origin + offset; } void translateRaysOnDevice( Ray* rays_device, int count, float3 offset) { const int blockSize = 512; const int blockCount = idivCeil(count, blockSize); hipLaunchKernelGGL(( translateRaysKernel), dim3(blockCount),dim3(blockSize), 0, 0, rays_device, count, offset ); } __global__ void shadeHitsKernel( float3* image, int count, const Hit* hits ) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; const float3 backgroundColor = make_float3( 0.2f, 0.2f, 0.2f ); if ( hits[idx].t < 0.0f ) { image[idx] = backgroundColor; } else { image[idx] = 0.5f*hits[idx].geom_normal + make_float3( 0.5f, 0.5f, 0.5f ); } } void shadeHitsOnDevice( float3* image_device, int count, const Hit* hits_device ) { const int blockSize = 512; const int blockCount = idivCeil(count, blockSize); hipLaunchKernelGGL(( shadeHitsKernel), dim3(blockCount),dim3(blockSize), 0, 0, image_device, count, hits_device ); }
ef77a36050e5a8600f1449ebe01bf3ce8a2f8c32.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <cuda_runtime.h> #include "Common.h" #include "optixRaycastingKernels.h" inline int idivCeil( int x, int y ) { return (x + y-1)/y; } __global__ void createRaysOrthoKernel( Ray* rays, int width, int height, float x0, float y0, float z, float dx, float dy ) { const int rayx = threadIdx.x + blockIdx.x*blockDim.x; const int rayy = threadIdx.y + blockIdx.y*blockDim.y; if( rayx >= width || rayy >= height ) return; const int idx = rayx + rayy*width; rays[idx].origin = make_float3( x0+rayx*dx, y0+rayy*dy, z ); rays[idx].tmin = 0.0f; rays[idx].dir = make_float3( 0, 0, 1 ); rays[idx].tmax = 1e34f; } // Note: uses left handed coordinate system void createRaysOrthoOnDevice( Ray* rays_device, int width, int height, float3 bbmin, float3 bbmax, float padding ) { const float3 bbspan = bbmax - bbmin; float dx = bbspan.x * (1 + 2*padding) / width; float dy = bbspan.y * (1 + 2*padding) / height; float x0 = bbmin.x - bbspan.x*padding + dx/2; float y0 = bbmin.y - bbspan.y*padding + dy/2; float z = bbmin.z - fmaxf(bbspan.z,1.0f)*.001f; dim3 blockSize( 32, 16 ); dim3 gridSize( idivCeil( width, blockSize.x ), idivCeil( height, blockSize.y ) ); createRaysOrthoKernel<<<gridSize,blockSize>>>( rays_device, width, height, x0, y0, z, dx, dy ); } __global__ void translateRaysKernel( Ray* rays, int count, float3 offset) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; rays[idx].origin = rays[idx].origin + offset; } void translateRaysOnDevice( Ray* rays_device, int count, float3 offset) { const int blockSize = 512; const int blockCount = idivCeil(count, blockSize); translateRaysKernel<<<blockCount,blockSize>>>( rays_device, count, offset ); } __global__ void shadeHitsKernel( float3* image, int count, const Hit* hits ) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if( idx >= count ) return; const float3 backgroundColor = make_float3( 0.2f, 0.2f, 0.2f ); if ( hits[idx].t < 0.0f ) { image[idx] = backgroundColor; } else { image[idx] = 0.5f*hits[idx].geom_normal + make_float3( 0.5f, 0.5f, 0.5f ); } } void shadeHitsOnDevice( float3* image_device, int count, const Hit* hits_device ) { const int blockSize = 512; const int blockCount = idivCeil(count, blockSize); shadeHitsKernel<<<blockCount,blockSize>>>( image_device, count, hits_device ); }
2881886d9540e7e67ee3c68c74f425b6809f99fd.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } #endif // USE_ROCM } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
2881886d9540e7e67ee3c68c74f425b6809f99fd.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template<typename Dtype> void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (M_ == 1) { caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype) 1., weight, bottom_data, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0], this->blobs_[1]->gpu_data(), top_data); } else { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., bottom_data, weight, (Dtype) 0., top_data); if (bias_term_) caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., bias_multiplier_.gpu_data(), this->blobs_[1]->gpu_data(), (Dtype) 1., top_data); } #endif // USE CUDA } else { #ifdef USE_GREENTEA if (M_ == 1) { greentea_gpu_gemv<Dtype>(this->device_->id(), CblasNoTrans, N_, K_, (Dtype) 1., (cl_mem) weight, 0, (cl_mem) bottom_data, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_axpy<Dtype>(this->device_->id(), N_, bias_multiplier_.cpu_data()[0], (cl_mem) (this->blobs_[1]->gpu_data()), 0, (cl_mem) top_data, 0); } else { greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasTrans, M_, N_, K_, (Dtype) 1., (cl_mem) bottom_data, 0, (cl_mem) weight, 0, (Dtype) 0., (cl_mem) top_data, 0); if (bias_term_) greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype) 1., (cl_mem) (bias_multiplier_.gpu_data()), 0, (cl_mem) (this->blobs_[1]->gpu_data()), 0, (Dtype) 1., (cl_mem) top_data, 0); } #endif // USE_GREENTEA } } template<typename Dtype> void InnerProductLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., top_diff, bottom_data, (Dtype) 1., this->blobs_[0]->mutable_gpu_diff()); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype) 1., top_diff, bias_multiplier_.gpu_data(), (Dtype) 1., this->blobs_[1]->mutable_gpu_diff()); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., top_diff, this->blobs_[0]->gpu_data(), (Dtype) 0., bottom[0]->mutable_gpu_diff()); } #endif // USE_CUDA } else { #ifdef USE_GREENTEA if (this->param_propagate_down_[0]) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); // Gradient with respect to weight greentea_gpu_gemm<Dtype>(this->device_->id(), CblasTrans, CblasNoTrans, N_, K_, M_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) bottom_data, 0, (Dtype) 1., (cl_mem) (this->blobs_[0]->mutable_gpu_diff()), 0); } if (bias_term_ && this->param_propagate_down_[1]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bias greentea_gpu_gemv<Dtype>(this->device_->id(), CblasTrans, M_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (bias_multiplier_.gpu_data()), 0, (Dtype) 1., (cl_mem) (this->blobs_[1]->mutable_gpu_diff()), 0); } if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); // Gradient with respect to bottom data greentea_gpu_gemm<Dtype>(this->device_->id(), CblasNoTrans, CblasNoTrans, M_, K_, N_, (Dtype) 1., (cl_mem) top_diff, 0, (cl_mem) (this->blobs_[0]->gpu_data()), 0, (Dtype) 0., (cl_mem) (bottom[0]->mutable_gpu_diff()), 0); } #endif // USE_GREENTEA } } INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer); } // namespace caffe
2ae2362043ee2e9b0bbae31b607906a56f744e4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Produced by CVXGEN, 2018-04-03 18:09:48 -0400. */ /* CVXGEN is Copyright (C) 2006-2017 Jacob Mattingley, [email protected]. */ /* The code in this file is Copyright (C) 2006-2017 Jacob Mattingley. */ /* CVXGEN, or solvers produced by CVXGEN, cannot be used for commercial */ /* applications without prior written permission from Jacob Mattingley. */ /* Filename: solver.c. */ /* Description: Main solver file. */ #include <stdio.h> #define deref(row,col,dim) row * dim + col typedef struct Params_t { double Hi[15]; } Params; typedef struct Vars_t { double *Ui; /* 15 rows. */ } Vars; typedef struct Workspace_t { double h[15]; double s_inv[15]; double s_inv_z[15]; double b[1]; double q[15]; double rhs[46]; double x[46]; double *s; double *z; double *y; double lhs_aff[46]; double lhs_cc[46]; double buffer[46]; double buffer2[46]; double KKT[90]; double L[45]; double d[46]; double v[46]; double d_inv[46]; double gap; double optval; double ineq_resid_squared; double eq_resid_squared; double block_33[1]; /* Pre-op symbols. */ double quad_640466485248[1]; int converged; } Workspace; typedef struct Settings_t { double resid_tol; double eps; int max_iters; int refine_steps; int better_start; /* Better start obviates the need for s_init and z_init. */ double s_init; double z_init; int verbose; /* Show extra details of the iterative refinement steps. */ int verbose_refinement; int debug; /* For regularization. Minimum value of abs(D_ii) in the kkt D factor. */ double kkt_reg; } Settings; struct solver_scope{ Vars vars; Params params; Workspace work; Settings settings; int id; __device__ solver_scope(int _id){ id = _id; }; __device__ double eval_gap(void) { int i; double gap; gap = 0; for (i = 0; i < 15; i++) gap += work.z[i]*work.s[i]; return gap; } __device__ void set_defaults(void) { settings.resid_tol = 1e-6; settings.eps = 1e-4; settings.max_iters = 25; settings.refine_steps = 1; settings.s_init = 1; settings.z_init = 1; settings.debug = 0; settings.verbose = 0; settings.verbose_refinement = 0; settings.better_start = 1; settings.kkt_reg = 1e-7; } __device__ void setup_pointers(void) { work.y = work.x + 15; work.s = work.x + 16; work.z = work.x + 31; vars.Ui = work.x + 0; } __device__ void setup_indexing(void) { setup_pointers(); } __device__ void set_start(void) { int i; for (i = 0; i < 15; i++) work.x[i] = 0; for (i = 0; i < 1; i++) work.y[i] = 0; for (i = 0; i < 15; i++) work.s[i] = (work.h[i] > 0) ? work.h[i] : settings.s_init; for (i = 0; i < 15; i++) work.z[i] = settings.z_init; } __device__ double eval_objv(void) { int i; double objv; /* Borrow space in work.rhs. */ multbyP(work.rhs, work.x); objv = 0; for (i = 0; i < 15; i++) objv += work.x[i]*work.rhs[i]; objv *= 0.5; for (i = 0; i < 15; i++) objv += work.q[i]*work.x[i]; objv += work.quad_640466485248[0]; return objv; } __device__ void fillrhs_aff(void) { int i; double *r1, *r2, *r3, *r4; r1 = work.rhs; r2 = work.rhs + 15; r3 = work.rhs + 30; r4 = work.rhs + 45; /* r1 = -A^Ty - G^Tz - Px - q. */ multbymAT(r1, work.y); multbymGT(work.buffer, work.z); for (i = 0; i < 15; i++) r1[i] += work.buffer[i]; multbyP(work.buffer, work.x); for (i = 0; i < 15; i++) r1[i] -= work.buffer[i] + work.q[i]; /* r2 = -z. */ for (i = 0; i < 15; i++) r2[i] = -work.z[i]; /* r3 = -Gx - s + h. */ multbymG(r3, work.x); for (i = 0; i < 15; i++) r3[i] += -work.s[i] + work.h[i]; /* r4 = -Ax + b. */ multbymA(r4, work.x); for (i = 0; i < 1; i++) r4[i] += work.b[i]; } __device__ void fillrhs_cc(void) { int i; double *r2; double *ds_aff, *dz_aff; double mu; double alpha; double sigma; double smu; double minval; r2 = work.rhs + 15; ds_aff = work.lhs_aff + 15; dz_aff = work.lhs_aff + 30; mu = 0; for (i = 0; i < 15; i++) mu += work.s[i]*work.z[i]; /* Don't finish calculating mu quite yet. */ /* Find min(min(ds./s), min(dz./z)). */ minval = 0; for (i = 0; i < 15; i++) if (ds_aff[i] < minval*work.s[i]) minval = ds_aff[i]/work.s[i]; for (i = 0; i < 15; i++) if (dz_aff[i] < minval*work.z[i]) minval = dz_aff[i]/work.z[i]; /* Find alpha. */ if (-1 < minval) alpha = 1; else alpha = -1/minval; sigma = 0; for (i = 0; i < 15; i++) sigma += (work.s[i] + alpha*ds_aff[i])* (work.z[i] + alpha*dz_aff[i]); sigma /= mu; sigma = sigma*sigma*sigma; /* Finish calculating mu now. */ mu *= 0.06666666666666667; smu = sigma*mu; /* Fill-in the rhs. */ for (i = 0; i < 15; i++) work.rhs[i] = 0; for (i = 30; i < 46; i++) work.rhs[i] = 0; for (i = 0; i < 15; i++) r2[i] = work.s_inv[i]*(smu - ds_aff[i]*dz_aff[i]); } __device__ void refine(double *target, double *var) { int i, j; double *residual = work.buffer; double norm2; double *new_var = work.buffer2; for (j = 0; j < settings.refine_steps; j++) { norm2 = 0; matrix_multiply(residual, var); for (i = 0; i < 46; i++) { residual[i] = residual[i] - target[i]; norm2 += residual[i]*residual[i]; } #ifndef ZERO_LIBRARY_MODE if (settings.verbose_refinement) { if (j == 0) printf("Initial residual before refinement has norm squared %.6g.\n", norm2); else printf("After refinement we get squared norm %.6g.\n", norm2); } #endif /* Solve to find new_var = KKT \ (target - A*var). */ ldl_solve(residual, new_var); /* Update var += new_var, or var += KKT \ (target - A*var). */ for (i = 0; i < 46; i++) { var[i] -= new_var[i]; } } #ifndef ZERO_LIBRARY_MODE if (settings.verbose_refinement) { /* Check the residual once more, but only if we're reporting it, since */ /* it's expensive. */ norm2 = 0; matrix_multiply(residual, var); for (i = 0; i < 46; i++) { residual[i] = residual[i] - target[i]; norm2 += residual[i]*residual[i]; } if (j == 0) printf("Initial residual before refinement has norm squared %.6g.\n", norm2); else printf("After refinement we get squared norm %.6g.\n", norm2); } #endif } __device__ double calc_ineq_resid_squared(void) { /* Calculates the norm ||-Gx - s + h||. */ double norm2_squared; int i; /* Find -Gx. */ multbymG(work.buffer, work.x); /* Add -s + h. */ for (i = 0; i < 15; i++) work.buffer[i] += -work.s[i] + work.h[i]; /* Now find the squared norm. */ norm2_squared = 0; for (i = 0; i < 15; i++) norm2_squared += work.buffer[i]*work.buffer[i]; return norm2_squared; } __device__ double calc_eq_resid_squared(void) { /* Calculates the norm ||-Ax + b||. */ double norm2_squared; int i; /* Find -Ax. */ multbymA(work.buffer, work.x); /* Add +b. */ for (i = 0; i < 1; i++) work.buffer[i] += work.b[i]; /* Now find the squared norm. */ norm2_squared = 0; for (i = 0; i < 1; i++) norm2_squared += work.buffer[i]*work.buffer[i]; return norm2_squared; } __device__ void better_start(void) { /* Calculates a better starting point, using a similar approach to CVXOPT. */ /* Not yet speed optimized. */ int i; double *x, *s, *z, *y; double alpha; work.block_33[0] = -1; /* Make sure sinvz is 1 to make hijacked KKT system ok. */ for (i = 0; i < 15; i++) work.s_inv_z[i] = 1; fill_KKT(); ldl_factor(); fillrhs_start(); /* Borrow work.lhs_aff for the solution. */ ldl_solve(work.rhs, work.lhs_aff); /* Don't do any refinement for now. Precision doesn't matter too much. */ x = work.lhs_aff; s = work.lhs_aff + 15; z = work.lhs_aff + 30; y = work.lhs_aff + 45; /* Just set x and y as is. */ for (i = 0; i < 15; i++) work.x[i] = x[i]; for (i = 0; i < 1; i++) work.y[i] = y[i]; /* Now complete the initialization. Start with s. */ /* Must have alpha > max(z). */ alpha = -1e99; for (i = 0; i < 15; i++) if (alpha < z[i]) alpha = z[i]; if (alpha < 0) { for (i = 0; i < 15; i++) work.s[i] = -z[i]; } else { alpha += 1; for (i = 0; i < 15; i++) work.s[i] = -z[i] + alpha; } /* Now initialize z. */ /* Now must have alpha > max(-z). */ alpha = -1e99; for (i = 0; i < 15; i++) if (alpha < -z[i]) alpha = -z[i]; if (alpha < 0) { for (i = 0; i < 15; i++) work.z[i] = z[i]; } else { alpha += 1; for (i = 0; i < 15; i++) work.z[i] = z[i] + alpha; } } __device__ void fillrhs_start(void) { /* Fill rhs with (-q, 0, h, b). */ int i; double *r1, *r2, *r3, *r4; r1 = work.rhs; r2 = work.rhs + 15; r3 = work.rhs + 30; r4 = work.rhs + 45; for (i = 0; i < 15; i++) r1[i] = -work.q[i]; for (i = 0; i < 15; i++) r2[i] = 0; for (i = 0; i < 15; i++) r3[i] = work.h[i]; for (i = 0; i < 1; i++) r4[i] = work.b[i]; } __device__ long solve(void) { int i; int iter; double *dx, *ds, *dy, *dz; double minval; double alpha; work.converged = 0; setup_pointers(); pre_ops(); #ifndef ZERO_LIBRARY_MODE if (settings.verbose) printf("iter objv gap |Ax-b| |Gx+s-h| step\n"); #endif fillq(); fillh(); fillb(); if (settings.better_start) better_start(); else set_start(); for (iter = 0; iter < settings.max_iters; iter++) { for (i = 0; i < 15; i++) { work.s_inv[i] = 1.0 / work.s[i]; work.s_inv_z[i] = work.s_inv[i]*work.z[i]; } work.block_33[0] = 0; fill_KKT(); ldl_factor(); /* Affine scaling directions. */ fillrhs_aff(); ldl_solve(work.rhs, work.lhs_aff); refine(work.rhs, work.lhs_aff); /* Centering plus corrector directions. */ fillrhs_cc(); ldl_solve(work.rhs, work.lhs_cc); refine(work.rhs, work.lhs_cc); /* Add the two together and store in aff. */ for (i = 0; i < 46; i++) work.lhs_aff[i] += work.lhs_cc[i]; /* Rename aff to reflect its new meaning. */ dx = work.lhs_aff; ds = work.lhs_aff + 15; dz = work.lhs_aff + 30; dy = work.lhs_aff + 45; /* Find min(min(ds./s), min(dz./z)). */ minval = 0; for (i = 0; i < 15; i++) if (ds[i] < minval*work.s[i]) minval = ds[i]/work.s[i]; for (i = 0; i < 15; i++) if (dz[i] < minval*work.z[i]) minval = dz[i]/work.z[i]; /* Find alpha. */ if (-0.99 < minval) alpha = 1; else alpha = -0.99/minval; /* Update the primal and dual variables. */ for (i = 0; i < 15; i++) work.x[i] += alpha*dx[i]; for (i = 0; i < 15; i++) work.s[i] += alpha*ds[i]; for (i = 0; i < 15; i++) work.z[i] += alpha*dz[i]; for (i = 0; i < 1; i++) work.y[i] += alpha*dy[i]; work.gap = eval_gap(); work.eq_resid_squared = calc_eq_resid_squared(); work.ineq_resid_squared = calc_ineq_resid_squared(); #ifndef ZERO_LIBRARY_MODE if (settings.verbose) { work.optval = eval_objv(); printf("%3d %10.3e %9.2e %9.2e %9.2e % 6.4f\n", iter+1, work.optval, work.gap, sqrt(work.eq_resid_squared), sqrt(work.ineq_resid_squared), alpha); } #endif /* Test termination conditions. Requires optimality, and satisfied */ /* constraints. */ if ( (work.gap < settings.eps) && (work.eq_resid_squared <= settings.resid_tol*settings.resid_tol) && (work.ineq_resid_squared <= settings.resid_tol*settings.resid_tol) ) { work.converged = 1; work.optval = eval_objv(); return iter+1; } } return iter; } __device__ void multbymA(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(1)-rhs[1]*(1)-rhs[2]*(1)-rhs[3]*(1)-rhs[4]*(1)-rhs[5]*(1)-rhs[6]*(1)-rhs[7]*(1)-rhs[8]*(1)-rhs[9]*(1)-rhs[10]*(1)-rhs[11]*(1)-rhs[12]*(1)-rhs[13]*(1)-rhs[14]*(1); } __device__ void multbymAT(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(1); lhs[1] = -rhs[0]*(1); lhs[2] = -rhs[0]*(1); lhs[3] = -rhs[0]*(1); lhs[4] = -rhs[0]*(1); lhs[5] = -rhs[0]*(1); lhs[6] = -rhs[0]*(1); lhs[7] = -rhs[0]*(1); lhs[8] = -rhs[0]*(1); lhs[9] = -rhs[0]*(1); lhs[10] = -rhs[0]*(1); lhs[11] = -rhs[0]*(1); lhs[12] = -rhs[0]*(1); lhs[13] = -rhs[0]*(1); lhs[14] = -rhs[0]*(1); } __device__ void multbymG(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(-1); lhs[1] = -rhs[1]*(-1); lhs[2] = -rhs[2]*(-1); lhs[3] = -rhs[3]*(-1); lhs[4] = -rhs[4]*(-1); lhs[5] = -rhs[5]*(-1); lhs[6] = -rhs[6]*(-1); lhs[7] = -rhs[7]*(-1); lhs[8] = -rhs[8]*(-1); lhs[9] = -rhs[9]*(-1); lhs[10] = -rhs[10]*(-1); lhs[11] = -rhs[11]*(-1); lhs[12] = -rhs[12]*(-1); lhs[13] = -rhs[13]*(-1); lhs[14] = -rhs[14]*(-1); } __device__ void multbymGT(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(-1); lhs[1] = -rhs[1]*(-1); lhs[2] = -rhs[2]*(-1); lhs[3] = -rhs[3]*(-1); lhs[4] = -rhs[4]*(-1); lhs[5] = -rhs[5]*(-1); lhs[6] = -rhs[6]*(-1); lhs[7] = -rhs[7]*(-1); lhs[8] = -rhs[8]*(-1); lhs[9] = -rhs[9]*(-1); lhs[10] = -rhs[10]*(-1); lhs[11] = -rhs[11]*(-1); lhs[12] = -rhs[12]*(-1); lhs[13] = -rhs[13]*(-1); lhs[14] = -rhs[14]*(-1); } __device__ void multbyP(double *lhs, double *rhs) { /* TODO use the fact that P is symmetric? */ /* TODO check doubling / half factor etc. */ lhs[0] = rhs[0]*(2); lhs[1] = rhs[1]*(2); lhs[2] = rhs[2]*(2); lhs[3] = rhs[3]*(2); lhs[4] = rhs[4]*(2); lhs[5] = rhs[5]*(2); lhs[6] = rhs[6]*(2); lhs[7] = rhs[7]*(2); lhs[8] = rhs[8]*(2); lhs[9] = rhs[9]*(2); lhs[10] = rhs[10]*(2); lhs[11] = rhs[11]*(2); lhs[12] = rhs[12]*(2); lhs[13] = rhs[13]*(2); lhs[14] = rhs[14]*(2); } __device__ void fillq(void) { work.q[0] = -2*params.Hi[0]; work.q[1] = -2*params.Hi[1]; work.q[2] = -2*params.Hi[2]; work.q[3] = -2*params.Hi[3]; work.q[4] = -2*params.Hi[4]; work.q[5] = -2*params.Hi[5]; work.q[6] = -2*params.Hi[6]; work.q[7] = -2*params.Hi[7]; work.q[8] = -2*params.Hi[8]; work.q[9] = -2*params.Hi[9]; work.q[10] = -2*params.Hi[10]; work.q[11] = -2*params.Hi[11]; work.q[12] = -2*params.Hi[12]; work.q[13] = -2*params.Hi[13]; work.q[14] = -2*params.Hi[14]; } __device__ void fillh(void) { work.h[0] = 0; work.h[1] = 0; work.h[2] = 0; work.h[3] = 0; work.h[4] = 0; work.h[5] = 0; work.h[6] = 0; work.h[7] = 0; work.h[8] = 0; work.h[9] = 0; work.h[10] = 0; work.h[11] = 0; work.h[12] = 0; work.h[13] = 0; work.h[14] = 0; } __device__ void fillb(void) { work.b[0] = 1; } __device__ void pre_ops(void) { work.quad_640466485248[0] = params.Hi[0]*params.Hi[0]+params.Hi[1]*params.Hi[1]+params.Hi[2]*params.Hi[2]+params.Hi[3]*params.Hi[3]+params.Hi[4]*params.Hi[4]+params.Hi[5]*params.Hi[5]+params.Hi[6]*params.Hi[6]+params.Hi[7]*params.Hi[7]+params.Hi[8]*params.Hi[8]+params.Hi[9]*params.Hi[9]+params.Hi[10]*params.Hi[10]+params.Hi[11]*params.Hi[11]+params.Hi[12]*params.Hi[12]+params.Hi[13]*params.Hi[13]+params.Hi[14]*params.Hi[14]; } /* Be sure to place ldl_solve first, so storage schemes are defined by it. */ __device__ void ldl_solve(double *target, double *var) { int i; /* Find var = (L*diag(work.d)*L') \ target, then unpermute. */ /* Answer goes into var. */ /* Forward substitution. */ /* Include permutation as we retrieve from target. Use v so we can unpermute */ /* later. */ work.v[0] = target[15]; work.v[1] = target[16]; work.v[2] = target[17]; work.v[3] = target[18]; work.v[4] = target[19]; work.v[5] = target[20]; work.v[6] = target[21]; work.v[7] = target[22]; work.v[8] = target[23]; work.v[9] = target[24]; work.v[10] = target[25]; work.v[11] = target[26]; work.v[12] = target[27]; work.v[13] = target[28]; work.v[14] = target[29]; work.v[15] = target[30]-work.L[0]*work.v[0]; work.v[16] = target[31]-work.L[1]*work.v[1]; work.v[17] = target[32]-work.L[2]*work.v[2]; work.v[18] = target[33]-work.L[3]*work.v[3]; work.v[19] = target[34]-work.L[4]*work.v[4]; work.v[20] = target[35]-work.L[5]*work.v[5]; work.v[21] = target[36]-work.L[6]*work.v[6]; work.v[22] = target[37]-work.L[7]*work.v[7]; work.v[23] = target[38]-work.L[8]*work.v[8]; work.v[24] = target[39]-work.L[9]*work.v[9]; work.v[25] = target[40]-work.L[10]*work.v[10]; work.v[26] = target[41]-work.L[11]*work.v[11]; work.v[27] = target[42]-work.L[12]*work.v[12]; work.v[28] = target[43]-work.L[13]*work.v[13]; work.v[29] = target[44]-work.L[14]*work.v[14]; work.v[30] = target[0]-work.L[15]*work.v[15]; work.v[31] = target[1]-work.L[16]*work.v[16]; work.v[32] = target[2]-work.L[17]*work.v[17]; work.v[33] = target[3]-work.L[18]*work.v[18]; work.v[34] = target[4]-work.L[19]*work.v[19]; work.v[35] = target[5]-work.L[20]*work.v[20]; work.v[36] = target[6]-work.L[21]*work.v[21]; work.v[37] = target[7]-work.L[22]*work.v[22]; work.v[38] = target[8]-work.L[23]*work.v[23]; work.v[39] = target[9]-work.L[24]*work.v[24]; work.v[40] = target[10]-work.L[25]*work.v[25]; work.v[41] = target[11]-work.L[26]*work.v[26]; work.v[42] = target[12]-work.L[27]*work.v[27]; work.v[43] = target[13]-work.L[28]*work.v[28]; work.v[44] = target[14]-work.L[29]*work.v[29]; work.v[45] = target[45]-work.L[30]*work.v[30]-work.L[31]*work.v[31]-work.L[32]*work.v[32]-work.L[33]*work.v[33]-work.L[34]*work.v[34]-work.L[35]*work.v[35]-work.L[36]*work.v[36]-work.L[37]*work.v[37]-work.L[38]*work.v[38]-work.L[39]*work.v[39]-work.L[40]*work.v[40]-work.L[41]*work.v[41]-work.L[42]*work.v[42]-work.L[43]*work.v[43]-work.L[44]*work.v[44]; /* Diagonal scaling. Assume correctness of work.d_inv. */ for (i = 0; i < 46; i++) work.v[i] *= work.d_inv[i]; /* Back substitution */ work.v[44] -= work.L[44]*work.v[45]; work.v[43] -= work.L[43]*work.v[45]; work.v[42] -= work.L[42]*work.v[45]; work.v[41] -= work.L[41]*work.v[45]; work.v[40] -= work.L[40]*work.v[45]; work.v[39] -= work.L[39]*work.v[45]; work.v[38] -= work.L[38]*work.v[45]; work.v[37] -= work.L[37]*work.v[45]; work.v[36] -= work.L[36]*work.v[45]; work.v[35] -= work.L[35]*work.v[45]; work.v[34] -= work.L[34]*work.v[45]; work.v[33] -= work.L[33]*work.v[45]; work.v[32] -= work.L[32]*work.v[45]; work.v[31] -= work.L[31]*work.v[45]; work.v[30] -= work.L[30]*work.v[45]; work.v[29] -= work.L[29]*work.v[44]; work.v[28] -= work.L[28]*work.v[43]; work.v[27] -= work.L[27]*work.v[42]; work.v[26] -= work.L[26]*work.v[41]; work.v[25] -= work.L[25]*work.v[40]; work.v[24] -= work.L[24]*work.v[39]; work.v[23] -= work.L[23]*work.v[38]; work.v[22] -= work.L[22]*work.v[37]; work.v[21] -= work.L[21]*work.v[36]; work.v[20] -= work.L[20]*work.v[35]; work.v[19] -= work.L[19]*work.v[34]; work.v[18] -= work.L[18]*work.v[33]; work.v[17] -= work.L[17]*work.v[32]; work.v[16] -= work.L[16]*work.v[31]; work.v[15] -= work.L[15]*work.v[30]; work.v[14] -= work.L[14]*work.v[29]; work.v[13] -= work.L[13]*work.v[28]; work.v[12] -= work.L[12]*work.v[27]; work.v[11] -= work.L[11]*work.v[26]; work.v[10] -= work.L[10]*work.v[25]; work.v[9] -= work.L[9]*work.v[24]; work.v[8] -= work.L[8]*work.v[23]; work.v[7] -= work.L[7]*work.v[22]; work.v[6] -= work.L[6]*work.v[21]; work.v[5] -= work.L[5]*work.v[20]; work.v[4] -= work.L[4]*work.v[19]; work.v[3] -= work.L[3]*work.v[18]; work.v[2] -= work.L[2]*work.v[17]; work.v[1] -= work.L[1]*work.v[16]; work.v[0] -= work.L[0]*work.v[15]; /* Unpermute the result, from v to var. */ var[0] = work.v[30]; var[1] = work.v[31]; var[2] = work.v[32]; var[3] = work.v[33]; var[4] = work.v[34]; var[5] = work.v[35]; var[6] = work.v[36]; var[7] = work.v[37]; var[8] = work.v[38]; var[9] = work.v[39]; var[10] = work.v[40]; var[11] = work.v[41]; var[12] = work.v[42]; var[13] = work.v[43]; var[14] = work.v[44]; var[15] = work.v[0]; var[16] = work.v[1]; var[17] = work.v[2]; var[18] = work.v[3]; var[19] = work.v[4]; var[20] = work.v[5]; var[21] = work.v[6]; var[22] = work.v[7]; var[23] = work.v[8]; var[24] = work.v[9]; var[25] = work.v[10]; var[26] = work.v[11]; var[27] = work.v[12]; var[28] = work.v[13]; var[29] = work.v[14]; var[30] = work.v[15]; var[31] = work.v[16]; var[32] = work.v[17]; var[33] = work.v[18]; var[34] = work.v[19]; var[35] = work.v[20]; var[36] = work.v[21]; var[37] = work.v[22]; var[38] = work.v[23]; var[39] = work.v[24]; var[40] = work.v[25]; var[41] = work.v[26]; var[42] = work.v[27]; var[43] = work.v[28]; var[44] = work.v[29]; var[45] = work.v[45]; #ifndef ZERO_LIBRARY_MODE if (settings.debug) { printf("Squared norm for solution is %.8g.\n", check_residual(target, var)); } #endif } __device__ void ldl_factor(void) { work.d[0] = work.KKT[0]; if (work.d[0] < 0) work.d[0] = settings.kkt_reg; else work.d[0] += settings.kkt_reg; work.d_inv[0] = 1/work.d[0]; work.L[0] = work.KKT[1]*work.d_inv[0]; work.v[1] = work.KKT[2]; work.d[1] = work.v[1]; if (work.d[1] < 0) work.d[1] = settings.kkt_reg; else work.d[1] += settings.kkt_reg; work.d_inv[1] = 1/work.d[1]; work.L[1] = (work.KKT[3])*work.d_inv[1]; work.v[2] = work.KKT[4]; work.d[2] = work.v[2]; if (work.d[2] < 0) work.d[2] = settings.kkt_reg; else work.d[2] += settings.kkt_reg; work.d_inv[2] = 1/work.d[2]; work.L[2] = (work.KKT[5])*work.d_inv[2]; work.v[3] = work.KKT[6]; work.d[3] = work.v[3]; if (work.d[3] < 0) work.d[3] = settings.kkt_reg; else work.d[3] += settings.kkt_reg; work.d_inv[3] = 1/work.d[3]; work.L[3] = (work.KKT[7])*work.d_inv[3]; work.v[4] = work.KKT[8]; work.d[4] = work.v[4]; if (work.d[4] < 0) work.d[4] = settings.kkt_reg; else work.d[4] += settings.kkt_reg; work.d_inv[4] = 1/work.d[4]; work.L[4] = (work.KKT[9])*work.d_inv[4]; work.v[5] = work.KKT[10]; work.d[5] = work.v[5]; if (work.d[5] < 0) work.d[5] = settings.kkt_reg; else work.d[5] += settings.kkt_reg; work.d_inv[5] = 1/work.d[5]; work.L[5] = (work.KKT[11])*work.d_inv[5]; work.v[6] = work.KKT[12]; work.d[6] = work.v[6]; if (work.d[6] < 0) work.d[6] = settings.kkt_reg; else work.d[6] += settings.kkt_reg; work.d_inv[6] = 1/work.d[6]; work.L[6] = (work.KKT[13])*work.d_inv[6]; work.v[7] = work.KKT[14]; work.d[7] = work.v[7]; if (work.d[7] < 0) work.d[7] = settings.kkt_reg; else work.d[7] += settings.kkt_reg; work.d_inv[7] = 1/work.d[7]; work.L[7] = (work.KKT[15])*work.d_inv[7]; work.v[8] = work.KKT[16]; work.d[8] = work.v[8]; if (work.d[8] < 0) work.d[8] = settings.kkt_reg; else work.d[8] += settings.kkt_reg; work.d_inv[8] = 1/work.d[8]; work.L[8] = (work.KKT[17])*work.d_inv[8]; work.v[9] = work.KKT[18]; work.d[9] = work.v[9]; if (work.d[9] < 0) work.d[9] = settings.kkt_reg; else work.d[9] += settings.kkt_reg; work.d_inv[9] = 1/work.d[9]; work.L[9] = (work.KKT[19])*work.d_inv[9]; work.v[10] = work.KKT[20]; work.d[10] = work.v[10]; if (work.d[10] < 0) work.d[10] = settings.kkt_reg; else work.d[10] += settings.kkt_reg; work.d_inv[10] = 1/work.d[10]; work.L[10] = (work.KKT[21])*work.d_inv[10]; work.v[11] = work.KKT[22]; work.d[11] = work.v[11]; if (work.d[11] < 0) work.d[11] = settings.kkt_reg; else work.d[11] += settings.kkt_reg; work.d_inv[11] = 1/work.d[11]; work.L[11] = (work.KKT[23])*work.d_inv[11]; work.v[12] = work.KKT[24]; work.d[12] = work.v[12]; if (work.d[12] < 0) work.d[12] = settings.kkt_reg; else work.d[12] += settings.kkt_reg; work.d_inv[12] = 1/work.d[12]; work.L[12] = (work.KKT[25])*work.d_inv[12]; work.v[13] = work.KKT[26]; work.d[13] = work.v[13]; if (work.d[13] < 0) work.d[13] = settings.kkt_reg; else work.d[13] += settings.kkt_reg; work.d_inv[13] = 1/work.d[13]; work.L[13] = (work.KKT[27])*work.d_inv[13]; work.v[14] = work.KKT[28]; work.d[14] = work.v[14]; if (work.d[14] < 0) work.d[14] = settings.kkt_reg; else work.d[14] += settings.kkt_reg; work.d_inv[14] = 1/work.d[14]; work.L[14] = (work.KKT[29])*work.d_inv[14]; work.v[0] = work.L[0]*work.d[0]; work.v[15] = work.KKT[30]-work.L[0]*work.v[0]; work.d[15] = work.v[15]; if (work.d[15] > 0) work.d[15] = -settings.kkt_reg; else work.d[15] -= settings.kkt_reg; work.d_inv[15] = 1/work.d[15]; work.L[15] = (work.KKT[31])*work.d_inv[15]; work.v[1] = work.L[1]*work.d[1]; work.v[16] = work.KKT[32]-work.L[1]*work.v[1]; work.d[16] = work.v[16]; if (work.d[16] > 0) work.d[16] = -settings.kkt_reg; else work.d[16] -= settings.kkt_reg; work.d_inv[16] = 1/work.d[16]; work.L[16] = (work.KKT[33])*work.d_inv[16]; work.v[2] = work.L[2]*work.d[2]; work.v[17] = work.KKT[34]-work.L[2]*work.v[2]; work.d[17] = work.v[17]; if (work.d[17] > 0) work.d[17] = -settings.kkt_reg; else work.d[17] -= settings.kkt_reg; work.d_inv[17] = 1/work.d[17]; work.L[17] = (work.KKT[35])*work.d_inv[17]; work.v[3] = work.L[3]*work.d[3]; work.v[18] = work.KKT[36]-work.L[3]*work.v[3]; work.d[18] = work.v[18]; if (work.d[18] > 0) work.d[18] = -settings.kkt_reg; else work.d[18] -= settings.kkt_reg; work.d_inv[18] = 1/work.d[18]; work.L[18] = (work.KKT[37])*work.d_inv[18]; work.v[4] = work.L[4]*work.d[4]; work.v[19] = work.KKT[38]-work.L[4]*work.v[4]; work.d[19] = work.v[19]; if (work.d[19] > 0) work.d[19] = -settings.kkt_reg; else work.d[19] -= settings.kkt_reg; work.d_inv[19] = 1/work.d[19]; work.L[19] = (work.KKT[39])*work.d_inv[19]; work.v[5] = work.L[5]*work.d[5]; work.v[20] = work.KKT[40]-work.L[5]*work.v[5]; work.d[20] = work.v[20]; if (work.d[20] > 0) work.d[20] = -settings.kkt_reg; else work.d[20] -= settings.kkt_reg; work.d_inv[20] = 1/work.d[20]; work.L[20] = (work.KKT[41])*work.d_inv[20]; work.v[6] = work.L[6]*work.d[6]; work.v[21] = work.KKT[42]-work.L[6]*work.v[6]; work.d[21] = work.v[21]; if (work.d[21] > 0) work.d[21] = -settings.kkt_reg; else work.d[21] -= settings.kkt_reg; work.d_inv[21] = 1/work.d[21]; work.L[21] = (work.KKT[43])*work.d_inv[21]; work.v[7] = work.L[7]*work.d[7]; work.v[22] = work.KKT[44]-work.L[7]*work.v[7]; work.d[22] = work.v[22]; if (work.d[22] > 0) work.d[22] = -settings.kkt_reg; else work.d[22] -= settings.kkt_reg; work.d_inv[22] = 1/work.d[22]; work.L[22] = (work.KKT[45])*work.d_inv[22]; work.v[8] = work.L[8]*work.d[8]; work.v[23] = work.KKT[46]-work.L[8]*work.v[8]; work.d[23] = work.v[23]; if (work.d[23] > 0) work.d[23] = -settings.kkt_reg; else work.d[23] -= settings.kkt_reg; work.d_inv[23] = 1/work.d[23]; work.L[23] = (work.KKT[47])*work.d_inv[23]; work.v[9] = work.L[9]*work.d[9]; work.v[24] = work.KKT[48]-work.L[9]*work.v[9]; work.d[24] = work.v[24]; if (work.d[24] > 0) work.d[24] = -settings.kkt_reg; else work.d[24] -= settings.kkt_reg; work.d_inv[24] = 1/work.d[24]; work.L[24] = (work.KKT[49])*work.d_inv[24]; work.v[10] = work.L[10]*work.d[10]; work.v[25] = work.KKT[50]-work.L[10]*work.v[10]; work.d[25] = work.v[25]; if (work.d[25] > 0) work.d[25] = -settings.kkt_reg; else work.d[25] -= settings.kkt_reg; work.d_inv[25] = 1/work.d[25]; work.L[25] = (work.KKT[51])*work.d_inv[25]; work.v[11] = work.L[11]*work.d[11]; work.v[26] = work.KKT[52]-work.L[11]*work.v[11]; work.d[26] = work.v[26]; if (work.d[26] > 0) work.d[26] = -settings.kkt_reg; else work.d[26] -= settings.kkt_reg; work.d_inv[26] = 1/work.d[26]; work.L[26] = (work.KKT[53])*work.d_inv[26]; work.v[12] = work.L[12]*work.d[12]; work.v[27] = work.KKT[54]-work.L[12]*work.v[12]; work.d[27] = work.v[27]; if (work.d[27] > 0) work.d[27] = -settings.kkt_reg; else work.d[27] -= settings.kkt_reg; work.d_inv[27] = 1/work.d[27]; work.L[27] = (work.KKT[55])*work.d_inv[27]; work.v[13] = work.L[13]*work.d[13]; work.v[28] = work.KKT[56]-work.L[13]*work.v[13]; work.d[28] = work.v[28]; if (work.d[28] > 0) work.d[28] = -settings.kkt_reg; else work.d[28] -= settings.kkt_reg; work.d_inv[28] = 1/work.d[28]; work.L[28] = (work.KKT[57])*work.d_inv[28]; work.v[14] = work.L[14]*work.d[14]; work.v[29] = work.KKT[58]-work.L[14]*work.v[14]; work.d[29] = work.v[29]; if (work.d[29] > 0) work.d[29] = -settings.kkt_reg; else work.d[29] -= settings.kkt_reg; work.d_inv[29] = 1/work.d[29]; work.L[29] = (work.KKT[59])*work.d_inv[29]; work.v[15] = work.L[15]*work.d[15]; work.v[30] = work.KKT[60]-work.L[15]*work.v[15]; work.d[30] = work.v[30]; if (work.d[30] < 0) work.d[30] = settings.kkt_reg; else work.d[30] += settings.kkt_reg; work.d_inv[30] = 1/work.d[30]; work.L[30] = (work.KKT[61])*work.d_inv[30]; work.v[16] = work.L[16]*work.d[16]; work.v[31] = work.KKT[62]-work.L[16]*work.v[16]; work.d[31] = work.v[31]; if (work.d[31] < 0) work.d[31] = settings.kkt_reg; else work.d[31] += settings.kkt_reg; work.d_inv[31] = 1/work.d[31]; work.L[31] = (work.KKT[63])*work.d_inv[31]; work.v[17] = work.L[17]*work.d[17]; work.v[32] = work.KKT[64]-work.L[17]*work.v[17]; work.d[32] = work.v[32]; if (work.d[32] < 0) work.d[32] = settings.kkt_reg; else work.d[32] += settings.kkt_reg; work.d_inv[32] = 1/work.d[32]; work.L[32] = (work.KKT[65])*work.d_inv[32]; work.v[18] = work.L[18]*work.d[18]; work.v[33] = work.KKT[66]-work.L[18]*work.v[18]; work.d[33] = work.v[33]; if (work.d[33] < 0) work.d[33] = settings.kkt_reg; else work.d[33] += settings.kkt_reg; work.d_inv[33] = 1/work.d[33]; work.L[33] = (work.KKT[67])*work.d_inv[33]; work.v[19] = work.L[19]*work.d[19]; work.v[34] = work.KKT[68]-work.L[19]*work.v[19]; work.d[34] = work.v[34]; if (work.d[34] < 0) work.d[34] = settings.kkt_reg; else work.d[34] += settings.kkt_reg; work.d_inv[34] = 1/work.d[34]; work.L[34] = (work.KKT[69])*work.d_inv[34]; work.v[20] = work.L[20]*work.d[20]; work.v[35] = work.KKT[70]-work.L[20]*work.v[20]; work.d[35] = work.v[35]; if (work.d[35] < 0) work.d[35] = settings.kkt_reg; else work.d[35] += settings.kkt_reg; work.d_inv[35] = 1/work.d[35]; work.L[35] = (work.KKT[71])*work.d_inv[35]; work.v[21] = work.L[21]*work.d[21]; work.v[36] = work.KKT[72]-work.L[21]*work.v[21]; work.d[36] = work.v[36]; if (work.d[36] < 0) work.d[36] = settings.kkt_reg; else work.d[36] += settings.kkt_reg; work.d_inv[36] = 1/work.d[36]; work.L[36] = (work.KKT[73])*work.d_inv[36]; work.v[22] = work.L[22]*work.d[22]; work.v[37] = work.KKT[74]-work.L[22]*work.v[22]; work.d[37] = work.v[37]; if (work.d[37] < 0) work.d[37] = settings.kkt_reg; else work.d[37] += settings.kkt_reg; work.d_inv[37] = 1/work.d[37]; work.L[37] = (work.KKT[75])*work.d_inv[37]; work.v[23] = work.L[23]*work.d[23]; work.v[38] = work.KKT[76]-work.L[23]*work.v[23]; work.d[38] = work.v[38]; if (work.d[38] < 0) work.d[38] = settings.kkt_reg; else work.d[38] += settings.kkt_reg; work.d_inv[38] = 1/work.d[38]; work.L[38] = (work.KKT[77])*work.d_inv[38]; work.v[24] = work.L[24]*work.d[24]; work.v[39] = work.KKT[78]-work.L[24]*work.v[24]; work.d[39] = work.v[39]; if (work.d[39] < 0) work.d[39] = settings.kkt_reg; else work.d[39] += settings.kkt_reg; work.d_inv[39] = 1/work.d[39]; work.L[39] = (work.KKT[79])*work.d_inv[39]; work.v[25] = work.L[25]*work.d[25]; work.v[40] = work.KKT[80]-work.L[25]*work.v[25]; work.d[40] = work.v[40]; if (work.d[40] < 0) work.d[40] = settings.kkt_reg; else work.d[40] += settings.kkt_reg; work.d_inv[40] = 1/work.d[40]; work.L[40] = (work.KKT[81])*work.d_inv[40]; work.v[26] = work.L[26]*work.d[26]; work.v[41] = work.KKT[82]-work.L[26]*work.v[26]; work.d[41] = work.v[41]; if (work.d[41] < 0) work.d[41] = settings.kkt_reg; else work.d[41] += settings.kkt_reg; work.d_inv[41] = 1/work.d[41]; work.L[41] = (work.KKT[83])*work.d_inv[41]; work.v[27] = work.L[27]*work.d[27]; work.v[42] = work.KKT[84]-work.L[27]*work.v[27]; work.d[42] = work.v[42]; if (work.d[42] < 0) work.d[42] = settings.kkt_reg; else work.d[42] += settings.kkt_reg; work.d_inv[42] = 1/work.d[42]; work.L[42] = (work.KKT[85])*work.d_inv[42]; work.v[28] = work.L[28]*work.d[28]; work.v[43] = work.KKT[86]-work.L[28]*work.v[28]; work.d[43] = work.v[43]; if (work.d[43] < 0) work.d[43] = settings.kkt_reg; else work.d[43] += settings.kkt_reg; work.d_inv[43] = 1/work.d[43]; work.L[43] = (work.KKT[87])*work.d_inv[43]; work.v[29] = work.L[29]*work.d[29]; work.v[44] = work.KKT[88]-work.L[29]*work.v[29]; work.d[44] = work.v[44]; if (work.d[44] < 0) work.d[44] = settings.kkt_reg; else work.d[44] += settings.kkt_reg; work.d_inv[44] = 1/work.d[44]; work.L[44] = (work.KKT[89])*work.d_inv[44]; work.v[30] = work.L[30]*work.d[30]; work.v[31] = work.L[31]*work.d[31]; work.v[32] = work.L[32]*work.d[32]; work.v[33] = work.L[33]*work.d[33]; work.v[34] = work.L[34]*work.d[34]; work.v[35] = work.L[35]*work.d[35]; work.v[36] = work.L[36]*work.d[36]; work.v[37] = work.L[37]*work.d[37]; work.v[38] = work.L[38]*work.d[38]; work.v[39] = work.L[39]*work.d[39]; work.v[40] = work.L[40]*work.d[40]; work.v[41] = work.L[41]*work.d[41]; work.v[42] = work.L[42]*work.d[42]; work.v[43] = work.L[43]*work.d[43]; work.v[44] = work.L[44]*work.d[44]; work.v[45] = 0-work.L[30]*work.v[30]-work.L[31]*work.v[31]-work.L[32]*work.v[32]-work.L[33]*work.v[33]-work.L[34]*work.v[34]-work.L[35]*work.v[35]-work.L[36]*work.v[36]-work.L[37]*work.v[37]-work.L[38]*work.v[38]-work.L[39]*work.v[39]-work.L[40]*work.v[40]-work.L[41]*work.v[41]-work.L[42]*work.v[42]-work.L[43]*work.v[43]-work.L[44]*work.v[44]; work.d[45] = work.v[45]; if (work.d[45] > 0) work.d[45] = -settings.kkt_reg; else work.d[45] -= settings.kkt_reg; work.d_inv[45] = 1/work.d[45]; #ifndef ZERO_LIBRARY_MODE if (settings.debug) { printf("Squared Frobenius for factorization is %.8g.\n", check_factorization()); } #endif } __device__ double check_factorization(void) { /* Returns the squared Frobenius norm of A - L*D*L'. */ double temp, residual; /* Only check the lower triangle. */ residual = 0; temp = work.KKT[60]-1*work.d[30]*1-work.L[15]*work.d[15]*work.L[15]; residual += temp*temp; temp = work.KKT[62]-1*work.d[31]*1-work.L[16]*work.d[16]*work.L[16]; residual += temp*temp; temp = work.KKT[64]-1*work.d[32]*1-work.L[17]*work.d[17]*work.L[17]; residual += temp*temp; temp = work.KKT[66]-1*work.d[33]*1-work.L[18]*work.d[18]*work.L[18]; residual += temp*temp; temp = work.KKT[68]-1*work.d[34]*1-work.L[19]*work.d[19]*work.L[19]; residual += temp*temp; temp = work.KKT[70]-1*work.d[35]*1-work.L[20]*work.d[20]*work.L[20]; residual += temp*temp; temp = work.KKT[72]-1*work.d[36]*1-work.L[21]*work.d[21]*work.L[21]; residual += temp*temp; temp = work.KKT[74]-1*work.d[37]*1-work.L[22]*work.d[22]*work.L[22]; residual += temp*temp; temp = work.KKT[76]-1*work.d[38]*1-work.L[23]*work.d[23]*work.L[23]; residual += temp*temp; temp = work.KKT[78]-1*work.d[39]*1-work.L[24]*work.d[24]*work.L[24]; residual += temp*temp; temp = work.KKT[80]-1*work.d[40]*1-work.L[25]*work.d[25]*work.L[25]; residual += temp*temp; temp = work.KKT[82]-1*work.d[41]*1-work.L[26]*work.d[26]*work.L[26]; residual += temp*temp; temp = work.KKT[84]-1*work.d[42]*1-work.L[27]*work.d[27]*work.L[27]; residual += temp*temp; temp = work.KKT[86]-1*work.d[43]*1-work.L[28]*work.d[28]*work.L[28]; residual += temp*temp; temp = work.KKT[88]-1*work.d[44]*1-work.L[29]*work.d[29]*work.L[29]; residual += temp*temp; temp = work.KKT[0]-1*work.d[0]*1; residual += temp*temp; temp = work.KKT[2]-1*work.d[1]*1; residual += temp*temp; temp = work.KKT[4]-1*work.d[2]*1; residual += temp*temp; temp = work.KKT[6]-1*work.d[3]*1; residual += temp*temp; temp = work.KKT[8]-1*work.d[4]*1; residual += temp*temp; temp = work.KKT[10]-1*work.d[5]*1; residual += temp*temp; temp = work.KKT[12]-1*work.d[6]*1; residual += temp*temp; temp = work.KKT[14]-1*work.d[7]*1; residual += temp*temp; temp = work.KKT[16]-1*work.d[8]*1; residual += temp*temp; temp = work.KKT[18]-1*work.d[9]*1; residual += temp*temp; temp = work.KKT[20]-1*work.d[10]*1; residual += temp*temp; temp = work.KKT[22]-1*work.d[11]*1; residual += temp*temp; temp = work.KKT[24]-1*work.d[12]*1; residual += temp*temp; temp = work.KKT[26]-1*work.d[13]*1; residual += temp*temp; temp = work.KKT[28]-1*work.d[14]*1; residual += temp*temp; temp = work.KKT[1]-work.L[0]*work.d[0]*1; residual += temp*temp; temp = work.KKT[3]-work.L[1]*work.d[1]*1; residual += temp*temp; temp = work.KKT[5]-work.L[2]*work.d[2]*1; residual += temp*temp; temp = work.KKT[7]-work.L[3]*work.d[3]*1; residual += temp*temp; temp = work.KKT[9]-work.L[4]*work.d[4]*1; residual += temp*temp; temp = work.KKT[11]-work.L[5]*work.d[5]*1; residual += temp*temp; temp = work.KKT[13]-work.L[6]*work.d[6]*1; residual += temp*temp; temp = work.KKT[15]-work.L[7]*work.d[7]*1; residual += temp*temp; temp = work.KKT[17]-work.L[8]*work.d[8]*1; residual += temp*temp; temp = work.KKT[19]-work.L[9]*work.d[9]*1; residual += temp*temp; temp = work.KKT[21]-work.L[10]*work.d[10]*1; residual += temp*temp; temp = work.KKT[23]-work.L[11]*work.d[11]*1; residual += temp*temp; temp = work.KKT[25]-work.L[12]*work.d[12]*1; residual += temp*temp; temp = work.KKT[27]-work.L[13]*work.d[13]*1; residual += temp*temp; temp = work.KKT[29]-work.L[14]*work.d[14]*1; residual += temp*temp; temp = work.KKT[30]-work.L[0]*work.d[0]*work.L[0]-1*work.d[15]*1; residual += temp*temp; temp = work.KKT[32]-work.L[1]*work.d[1]*work.L[1]-1*work.d[16]*1; residual += temp*temp; temp = work.KKT[34]-work.L[2]*work.d[2]*work.L[2]-1*work.d[17]*1; residual += temp*temp; temp = work.KKT[36]-work.L[3]*work.d[3]*work.L[3]-1*work.d[18]*1; residual += temp*temp; temp = work.KKT[38]-work.L[4]*work.d[4]*work.L[4]-1*work.d[19]*1; residual += temp*temp; temp = work.KKT[40]-work.L[5]*work.d[5]*work.L[5]-1*work.d[20]*1; residual += temp*temp; temp = work.KKT[42]-work.L[6]*work.d[6]*work.L[6]-1*work.d[21]*1; residual += temp*temp; temp = work.KKT[44]-work.L[7]*work.d[7]*work.L[7]-1*work.d[22]*1; residual += temp*temp; temp = work.KKT[46]-work.L[8]*work.d[8]*work.L[8]-1*work.d[23]*1; residual += temp*temp; temp = work.KKT[48]-work.L[9]*work.d[9]*work.L[9]-1*work.d[24]*1; residual += temp*temp; temp = work.KKT[50]-work.L[10]*work.d[10]*work.L[10]-1*work.d[25]*1; residual += temp*temp; temp = work.KKT[52]-work.L[11]*work.d[11]*work.L[11]-1*work.d[26]*1; residual += temp*temp; temp = work.KKT[54]-work.L[12]*work.d[12]*work.L[12]-1*work.d[27]*1; residual += temp*temp; temp = work.KKT[56]-work.L[13]*work.d[13]*work.L[13]-1*work.d[28]*1; residual += temp*temp; temp = work.KKT[58]-work.L[14]*work.d[14]*work.L[14]-1*work.d[29]*1; residual += temp*temp; temp = work.KKT[31]-1*work.d[15]*work.L[15]; residual += temp*temp; temp = work.KKT[33]-1*work.d[16]*work.L[16]; residual += temp*temp; temp = work.KKT[35]-1*work.d[17]*work.L[17]; residual += temp*temp; temp = work.KKT[37]-1*work.d[18]*work.L[18]; residual += temp*temp; temp = work.KKT[39]-1*work.d[19]*work.L[19]; residual += temp*temp; temp = work.KKT[41]-1*work.d[20]*work.L[20]; residual += temp*temp; temp = work.KKT[43]-1*work.d[21]*work.L[21]; residual += temp*temp; temp = work.KKT[45]-1*work.d[22]*work.L[22]; residual += temp*temp; temp = work.KKT[47]-1*work.d[23]*work.L[23]; residual += temp*temp; temp = work.KKT[49]-1*work.d[24]*work.L[24]; residual += temp*temp; temp = work.KKT[51]-1*work.d[25]*work.L[25]; residual += temp*temp; temp = work.KKT[53]-1*work.d[26]*work.L[26]; residual += temp*temp; temp = work.KKT[55]-1*work.d[27]*work.L[27]; residual += temp*temp; temp = work.KKT[57]-1*work.d[28]*work.L[28]; residual += temp*temp; temp = work.KKT[59]-1*work.d[29]*work.L[29]; residual += temp*temp; temp = work.KKT[61]-work.L[30]*work.d[30]*1; residual += temp*temp; temp = work.KKT[63]-work.L[31]*work.d[31]*1; residual += temp*temp; temp = work.KKT[65]-work.L[32]*work.d[32]*1; residual += temp*temp; temp = work.KKT[67]-work.L[33]*work.d[33]*1; residual += temp*temp; temp = work.KKT[69]-work.L[34]*work.d[34]*1; residual += temp*temp; temp = work.KKT[71]-work.L[35]*work.d[35]*1; residual += temp*temp; temp = work.KKT[73]-work.L[36]*work.d[36]*1; residual += temp*temp; temp = work.KKT[75]-work.L[37]*work.d[37]*1; residual += temp*temp; temp = work.KKT[77]-work.L[38]*work.d[38]*1; residual += temp*temp; temp = work.KKT[79]-work.L[39]*work.d[39]*1; residual += temp*temp; temp = work.KKT[81]-work.L[40]*work.d[40]*1; residual += temp*temp; temp = work.KKT[83]-work.L[41]*work.d[41]*1; residual += temp*temp; temp = work.KKT[85]-work.L[42]*work.d[42]*1; residual += temp*temp; temp = work.KKT[87]-work.L[43]*work.d[43]*1; residual += temp*temp; temp = work.KKT[89]-work.L[44]*work.d[44]*1; residual += temp*temp; return residual; } __device__ void matrix_multiply(double *result, double *source) { /* Finds result = A*source. */ result[0] = work.KKT[60]*source[0]+work.KKT[31]*source[30]+work.KKT[61]*source[45]; result[1] = work.KKT[62]*source[1]+work.KKT[33]*source[31]+work.KKT[63]*source[45]; result[2] = work.KKT[64]*source[2]+work.KKT[35]*source[32]+work.KKT[65]*source[45]; result[3] = work.KKT[66]*source[3]+work.KKT[37]*source[33]+work.KKT[67]*source[45]; result[4] = work.KKT[68]*source[4]+work.KKT[39]*source[34]+work.KKT[69]*source[45]; result[5] = work.KKT[70]*source[5]+work.KKT[41]*source[35]+work.KKT[71]*source[45]; result[6] = work.KKT[72]*source[6]+work.KKT[43]*source[36]+work.KKT[73]*source[45]; result[7] = work.KKT[74]*source[7]+work.KKT[45]*source[37]+work.KKT[75]*source[45]; result[8] = work.KKT[76]*source[8]+work.KKT[47]*source[38]+work.KKT[77]*source[45]; result[9] = work.KKT[78]*source[9]+work.KKT[49]*source[39]+work.KKT[79]*source[45]; result[10] = work.KKT[80]*source[10]+work.KKT[51]*source[40]+work.KKT[81]*source[45]; result[11] = work.KKT[82]*source[11]+work.KKT[53]*source[41]+work.KKT[83]*source[45]; result[12] = work.KKT[84]*source[12]+work.KKT[55]*source[42]+work.KKT[85]*source[45]; result[13] = work.KKT[86]*source[13]+work.KKT[57]*source[43]+work.KKT[87]*source[45]; result[14] = work.KKT[88]*source[14]+work.KKT[59]*source[44]+work.KKT[89]*source[45]; result[15] = work.KKT[0]*source[15]+work.KKT[1]*source[30]; result[16] = work.KKT[2]*source[16]+work.KKT[3]*source[31]; result[17] = work.KKT[4]*source[17]+work.KKT[5]*source[32]; result[18] = work.KKT[6]*source[18]+work.KKT[7]*source[33]; result[19] = work.KKT[8]*source[19]+work.KKT[9]*source[34]; result[20] = work.KKT[10]*source[20]+work.KKT[11]*source[35]; result[21] = work.KKT[12]*source[21]+work.KKT[13]*source[36]; result[22] = work.KKT[14]*source[22]+work.KKT[15]*source[37]; result[23] = work.KKT[16]*source[23]+work.KKT[17]*source[38]; result[24] = work.KKT[18]*source[24]+work.KKT[19]*source[39]; result[25] = work.KKT[20]*source[25]+work.KKT[21]*source[40]; result[26] = work.KKT[22]*source[26]+work.KKT[23]*source[41]; result[27] = work.KKT[24]*source[27]+work.KKT[25]*source[42]; result[28] = work.KKT[26]*source[28]+work.KKT[27]*source[43]; result[29] = work.KKT[28]*source[29]+work.KKT[29]*source[44]; result[30] = work.KKT[1]*source[15]+work.KKT[30]*source[30]+work.KKT[31]*source[0]; result[31] = work.KKT[3]*source[16]+work.KKT[32]*source[31]+work.KKT[33]*source[1]; result[32] = work.KKT[5]*source[17]+work.KKT[34]*source[32]+work.KKT[35]*source[2]; result[33] = work.KKT[7]*source[18]+work.KKT[36]*source[33]+work.KKT[37]*source[3]; result[34] = work.KKT[9]*source[19]+work.KKT[38]*source[34]+work.KKT[39]*source[4]; result[35] = work.KKT[11]*source[20]+work.KKT[40]*source[35]+work.KKT[41]*source[5]; result[36] = work.KKT[13]*source[21]+work.KKT[42]*source[36]+work.KKT[43]*source[6]; result[37] = work.KKT[15]*source[22]+work.KKT[44]*source[37]+work.KKT[45]*source[7]; result[38] = work.KKT[17]*source[23]+work.KKT[46]*source[38]+work.KKT[47]*source[8]; result[39] = work.KKT[19]*source[24]+work.KKT[48]*source[39]+work.KKT[49]*source[9]; result[40] = work.KKT[21]*source[25]+work.KKT[50]*source[40]+work.KKT[51]*source[10]; result[41] = work.KKT[23]*source[26]+work.KKT[52]*source[41]+work.KKT[53]*source[11]; result[42] = work.KKT[25]*source[27]+work.KKT[54]*source[42]+work.KKT[55]*source[12]; result[43] = work.KKT[27]*source[28]+work.KKT[56]*source[43]+work.KKT[57]*source[13]; result[44] = work.KKT[29]*source[29]+work.KKT[58]*source[44]+work.KKT[59]*source[14]; result[45] = work.KKT[61]*source[0]+work.KKT[63]*source[1]+work.KKT[65]*source[2]+work.KKT[67]*source[3]+work.KKT[69]*source[4]+work.KKT[71]*source[5]+work.KKT[73]*source[6]+work.KKT[75]*source[7]+work.KKT[77]*source[8]+work.KKT[79]*source[9]+work.KKT[81]*source[10]+work.KKT[83]*source[11]+work.KKT[85]*source[12]+work.KKT[87]*source[13]+work.KKT[89]*source[14]; } __device__ double check_residual(double *target, double *multiplicand) { /* Returns the squared 2-norm of lhs - A*rhs. */ /* Reuses v to find the residual. */ int i; double residual; residual = 0; matrix_multiply(work.v, multiplicand); for (i = 0; i < 15; i++) { residual += (target[i] - work.v[i])*(target[i] - work.v[i]); } return residual; } __device__ void fill_KKT(void) { work.KKT[60] = 2; work.KKT[62] = 2; work.KKT[64] = 2; work.KKT[66] = 2; work.KKT[68] = 2; work.KKT[70] = 2; work.KKT[72] = 2; work.KKT[74] = 2; work.KKT[76] = 2; work.KKT[78] = 2; work.KKT[80] = 2; work.KKT[82] = 2; work.KKT[84] = 2; work.KKT[86] = 2; work.KKT[88] = 2; work.KKT[0] = work.s_inv_z[0]; work.KKT[2] = work.s_inv_z[1]; work.KKT[4] = work.s_inv_z[2]; work.KKT[6] = work.s_inv_z[3]; work.KKT[8] = work.s_inv_z[4]; work.KKT[10] = work.s_inv_z[5]; work.KKT[12] = work.s_inv_z[6]; work.KKT[14] = work.s_inv_z[7]; work.KKT[16] = work.s_inv_z[8]; work.KKT[18] = work.s_inv_z[9]; work.KKT[20] = work.s_inv_z[10]; work.KKT[22] = work.s_inv_z[11]; work.KKT[24] = work.s_inv_z[12]; work.KKT[26] = work.s_inv_z[13]; work.KKT[28] = work.s_inv_z[14]; work.KKT[1] = 1; work.KKT[3] = 1; work.KKT[5] = 1; work.KKT[7] = 1; work.KKT[9] = 1; work.KKT[11] = 1; work.KKT[13] = 1; work.KKT[15] = 1; work.KKT[17] = 1; work.KKT[19] = 1; work.KKT[21] = 1; work.KKT[23] = 1; work.KKT[25] = 1; work.KKT[27] = 1; work.KKT[29] = 1; work.KKT[30] = work.block_33[0]; work.KKT[32] = work.block_33[0]; work.KKT[34] = work.block_33[0]; work.KKT[36] = work.block_33[0]; work.KKT[38] = work.block_33[0]; work.KKT[40] = work.block_33[0]; work.KKT[42] = work.block_33[0]; work.KKT[44] = work.block_33[0]; work.KKT[46] = work.block_33[0]; work.KKT[48] = work.block_33[0]; work.KKT[50] = work.block_33[0]; work.KKT[52] = work.block_33[0]; work.KKT[54] = work.block_33[0]; work.KKT[56] = work.block_33[0]; work.KKT[58] = work.block_33[0]; work.KKT[31] = -1; work.KKT[33] = -1; work.KKT[35] = -1; work.KKT[37] = -1; work.KKT[39] = -1; work.KKT[41] = -1; work.KKT[43] = -1; work.KKT[45] = -1; work.KKT[47] = -1; work.KKT[49] = -1; work.KKT[51] = -1; work.KKT[53] = -1; work.KKT[55] = -1; work.KKT[57] = -1; work.KKT[59] = -1; work.KKT[61] = 1; work.KKT[63] = 1; work.KKT[65] = 1; work.KKT[67] = 1; work.KKT[69] = 1; work.KKT[71] = 1; work.KKT[73] = 1; work.KKT[75] = 1; work.KKT[77] = 1; work.KKT[79] = 1; work.KKT[81] = 1; work.KKT[83] = 1; work.KKT[85] = 1; work.KKT[87] = 1; work.KKT[89] = 1; } }; __device__ int getGlobalIdx_1D_1D(){ return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D(){ int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __global__ void update_S( double* S, double* DataMatrix, double* Centroids, double ThresholdValue, int numRows, int numFeatures){ extern __shared__ double normBuffer[]; int i = blockIdx.x; int k = blockIdx.y; int f = threadIdx.x; int tid = threadIdx.x; int numCentroids = gridDim.y; // // if(i == 0 && k == 0 && f == 0){ // printf("i: %d, k: %d, f: %d\n", gridDim.x, gridDim.y, blockDim.x); // } normBuffer[f] = (DataMatrix[deref(i, f, numFeatures)] - Centroids[deref(k, f, numFeatures)]) * (DataMatrix[deref(i, f, numFeatures)] - Centroids[deref(k, f, numFeatures)]); __syncthreads(); //this syncthreads is essential //this section of code sums down our stored scalars * features into 1 feature of one centroid for(int step = 1; step < numFeatures; step*=2){ while( (tid+step < numFeatures) ){ int ndx = 2 * step * tid; if( ndx + step < numFeatures){ normBuffer[ndx] += normBuffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); normBuffer[0] = sqrt(normBuffer[0]); // if(i == 5 && k == 3 && f == 0){ // printf("i: %d, k: %d, normBuffer[%d] = %f\n", i, k, f, sqrt(normBuffer[f]) ); // } if(normBuffer[0] == 0){ //do nothing so S[i][k] } else if( normBuffer[0] > ThresholdValue){ S[deref(i,k,numCentroids)] = 0; } else{ S[deref(i,k,numCentroids)] = 1/normBuffer[0]; } } /*Loads a buffer with all our scalar values calulated from S and U and used to multiply with a given centroid K*/ __global__ void load_scalar_buffer(double* ScalarBuffer, double* S, double* U, int numRows, int numCentroids){ int k = blockIdx.x; int tid = threadIdx.x; for(int i = tid; i < numRows; i += blockDim.x){ ScalarBuffer[deref(k, i, numRows)] = S[deref(i, k, numCentroids)] * U[deref(i, k, numCentroids)]; } __syncthreads(); } __global__ void calculate_centroids(double* DataMatrix, double* V, double* ScalarBuffer, int numRows, int numFeatures){ extern __shared__ double localbuffer[]; int k = blockIdx.x; int f = blockIdx.y; int tid = threadIdx.x; // if(k == 0 && f == 0 && tid == 0){ // printf("k: %d, f: %d, i: %d\n", gridDim.x, gridDim.y, blockDim.x); // } //this shared memory buffer represents a stack of scalars * a feature for all rows in my data matrix // used as a storage for summing in the next step for(int i = tid; i < numRows; i += blockDim.x){ localbuffer[i] = ScalarBuffer[deref(k, i, numRows)] * DataMatrix[deref(i,f,numFeatures)]; } __syncthreads(); //this syncthreads is essential //this section of code sums down our stored scalars * features into 1 feature of one centroid for(int step = 1; step < numRows; step*=2){ while( (tid+step < numRows) ){ int ndx = 2 * step * tid; if( ndx + step < numRows){ localbuffer[ndx] += localbuffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); // if(k==2 && tid == 0){ // printf("V[%d][%d] = %f \n", k, f, localbuffer[0]); // } //store in global memory for later use V[deref(k,f,numFeatures)] = localbuffer[0]; } __global__ void find_centroids(double* DataMatrix, double* V, double* ScalarBuffer, int numRows, int numFeatures){ int gtid = getGlobalIdx_2D_1D(); int k = blockIdx.x; int i = threadIdx.x; int f = threadIdx.x; // if(k==3 && f < numFeatures){ // printf("V[%d][%d] = %f \n", k, f, V[deref(k,f,numRows)]); // } //Sum working for(int step = 1; step < numRows; step*=2){ while( (i+step < numRows) ){ int ndx = 2 * step * i; if(ndx+step < numRows){ ScalarBuffer[deref(k, ndx, numRows)] += ScalarBuffer[deref(k, ndx+step, numRows)]; } i += blockDim.x; } i = threadIdx.x; __syncthreads(); } __syncthreads(); //if(ScalarBuffer[deref(k, 0, numRows)] != 0.0){ double normalize = (1/ScalarBuffer[deref(k, 0, numRows)]); if(f < numFeatures){ V[deref(k, f, numFeatures)] *= normalize; } //} __syncthreads(); // if(k==3 && f < numFeatures){ // printf("V[%d][%d] = %f, ScalarBuffer: %f, Normalize: %f \n", k, f, V[deref(k,f,numFeatures)], ScalarBuffer[deref(k, 0, numRows)], normalize); // // } __syncthreads(); } __global__ void init_S(double* S, int numCols){ int i = blockIdx.x; int k = threadIdx.x; S[deref(i,k,numCols)] = 1.0; } __global__ void build_h_matrix(double* H, double* DataMatrix, double* S, double* Centroids){ int gtid = getGlobalIdx_2D_1D(); int k = blockIdx.x; int i = blockIdx.y; int f = threadIdx.x; int tid = threadIdx.x; int numCentroids = gridDim.x; int numFeatures = blockDim.x; __shared__ double buffer[50]; //unlikely to have more than 50 features //Get Square Subtracted vectors //part of the norm buffer[f] = (DataMatrix[deref(i,f,numFeatures)] - Centroids[deref(k,f,numFeatures)]) * (DataMatrix[deref(i,f,numFeatures)] - Centroids[deref(k,f,numFeatures)]) ; // if( i == 0 && k == 0 ){ // printf("%d, %d, Centroid[%d]: %f\n", i, k, f, Centroids[deref(k,f,numFeatures)]); // } __syncthreads(); for(int step = 1; step < numFeatures; step*=2){ while( (tid+step < numFeatures) ){ int ndx = 2 * step * tid; if( ndx + step < numFeatures){ buffer[ndx] += buffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); // if( i == 0 && k == 0 && f == 0){ // printf("%d, %d, Buffer[%d]: %f\n", i, k, f, buffer[f]); // } // if( i == 0 && k == 0 && f == 0 ){ // printf("Summed buffer for S[%d][%d]: %f\n", i, k, S[deref(i,k,numCentroids)]); // } if(f == 0){ H[deref(i,k,numCentroids)] = buffer[0] * S[deref(i,k,numCentroids)]; } __syncthreads(); // if( i == 0 && k == 0 && f == 0 ){ // printf("Summed buffer for H[%d][%d]: %f\n", i, k, H[deref(i,k,numCentroids)]); // } } __global__ void update_membership_matrix( double* U_GPU, double* H, double RegParam, int numClusters, int numRows){ /* Get Specific Thread Assignment Data */ int tid = blockIdx.x; if(tid < numRows){ solver_scope solver(tid); solver.set_defaults(); // Set basic algorithm parameters. solver.setup_indexing(); // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // printf("H[%d] = %f;\n", i, H[deref(tid, i, numClusters)]); // } // load one line of the h matrix // calculate the h_tilde in here double multiplicand = (-1/(2*RegParam)); for(int i = 0; i < numClusters; i += blockDim.y){ solver.params.Hi[i] = H[deref(tid,i, numClusters)] * multiplicand; } // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // printf("params.Hi[%d] = %f;\n", i, solver.params.Hi[i]); // } // Solve our problem at high speed! solver.solve(); //use_solution(vars, params, tid); for(int i = 0; i < numClusters; i += blockDim.y){ U_GPU[deref(tid, i, numClusters)] = (double) solver.vars.Ui[i]; } __syncthreads(); // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // if(! isnan(U_GPU[deref(tid, i, numClusters)]) ) // printf("U_GPU[%d][%d] = %f\n", tid, i, U_GPU[deref(tid, i, numClusters)]); // } } }
2ae2362043ee2e9b0bbae31b607906a56f744e4d.cu
/* Produced by CVXGEN, 2018-04-03 18:09:48 -0400. */ /* CVXGEN is Copyright (C) 2006-2017 Jacob Mattingley, [email protected]. */ /* The code in this file is Copyright (C) 2006-2017 Jacob Mattingley. */ /* CVXGEN, or solvers produced by CVXGEN, cannot be used for commercial */ /* applications without prior written permission from Jacob Mattingley. */ /* Filename: solver.c. */ /* Description: Main solver file. */ #include <stdio.h> #define deref(row,col,dim) row * dim + col typedef struct Params_t { double Hi[15]; } Params; typedef struct Vars_t { double *Ui; /* 15 rows. */ } Vars; typedef struct Workspace_t { double h[15]; double s_inv[15]; double s_inv_z[15]; double b[1]; double q[15]; double rhs[46]; double x[46]; double *s; double *z; double *y; double lhs_aff[46]; double lhs_cc[46]; double buffer[46]; double buffer2[46]; double KKT[90]; double L[45]; double d[46]; double v[46]; double d_inv[46]; double gap; double optval; double ineq_resid_squared; double eq_resid_squared; double block_33[1]; /* Pre-op symbols. */ double quad_640466485248[1]; int converged; } Workspace; typedef struct Settings_t { double resid_tol; double eps; int max_iters; int refine_steps; int better_start; /* Better start obviates the need for s_init and z_init. */ double s_init; double z_init; int verbose; /* Show extra details of the iterative refinement steps. */ int verbose_refinement; int debug; /* For regularization. Minimum value of abs(D_ii) in the kkt D factor. */ double kkt_reg; } Settings; struct solver_scope{ Vars vars; Params params; Workspace work; Settings settings; int id; __device__ solver_scope(int _id){ id = _id; }; __device__ double eval_gap(void) { int i; double gap; gap = 0; for (i = 0; i < 15; i++) gap += work.z[i]*work.s[i]; return gap; } __device__ void set_defaults(void) { settings.resid_tol = 1e-6; settings.eps = 1e-4; settings.max_iters = 25; settings.refine_steps = 1; settings.s_init = 1; settings.z_init = 1; settings.debug = 0; settings.verbose = 0; settings.verbose_refinement = 0; settings.better_start = 1; settings.kkt_reg = 1e-7; } __device__ void setup_pointers(void) { work.y = work.x + 15; work.s = work.x + 16; work.z = work.x + 31; vars.Ui = work.x + 0; } __device__ void setup_indexing(void) { setup_pointers(); } __device__ void set_start(void) { int i; for (i = 0; i < 15; i++) work.x[i] = 0; for (i = 0; i < 1; i++) work.y[i] = 0; for (i = 0; i < 15; i++) work.s[i] = (work.h[i] > 0) ? work.h[i] : settings.s_init; for (i = 0; i < 15; i++) work.z[i] = settings.z_init; } __device__ double eval_objv(void) { int i; double objv; /* Borrow space in work.rhs. */ multbyP(work.rhs, work.x); objv = 0; for (i = 0; i < 15; i++) objv += work.x[i]*work.rhs[i]; objv *= 0.5; for (i = 0; i < 15; i++) objv += work.q[i]*work.x[i]; objv += work.quad_640466485248[0]; return objv; } __device__ void fillrhs_aff(void) { int i; double *r1, *r2, *r3, *r4; r1 = work.rhs; r2 = work.rhs + 15; r3 = work.rhs + 30; r4 = work.rhs + 45; /* r1 = -A^Ty - G^Tz - Px - q. */ multbymAT(r1, work.y); multbymGT(work.buffer, work.z); for (i = 0; i < 15; i++) r1[i] += work.buffer[i]; multbyP(work.buffer, work.x); for (i = 0; i < 15; i++) r1[i] -= work.buffer[i] + work.q[i]; /* r2 = -z. */ for (i = 0; i < 15; i++) r2[i] = -work.z[i]; /* r3 = -Gx - s + h. */ multbymG(r3, work.x); for (i = 0; i < 15; i++) r3[i] += -work.s[i] + work.h[i]; /* r4 = -Ax + b. */ multbymA(r4, work.x); for (i = 0; i < 1; i++) r4[i] += work.b[i]; } __device__ void fillrhs_cc(void) { int i; double *r2; double *ds_aff, *dz_aff; double mu; double alpha; double sigma; double smu; double minval; r2 = work.rhs + 15; ds_aff = work.lhs_aff + 15; dz_aff = work.lhs_aff + 30; mu = 0; for (i = 0; i < 15; i++) mu += work.s[i]*work.z[i]; /* Don't finish calculating mu quite yet. */ /* Find min(min(ds./s), min(dz./z)). */ minval = 0; for (i = 0; i < 15; i++) if (ds_aff[i] < minval*work.s[i]) minval = ds_aff[i]/work.s[i]; for (i = 0; i < 15; i++) if (dz_aff[i] < minval*work.z[i]) minval = dz_aff[i]/work.z[i]; /* Find alpha. */ if (-1 < minval) alpha = 1; else alpha = -1/minval; sigma = 0; for (i = 0; i < 15; i++) sigma += (work.s[i] + alpha*ds_aff[i])* (work.z[i] + alpha*dz_aff[i]); sigma /= mu; sigma = sigma*sigma*sigma; /* Finish calculating mu now. */ mu *= 0.06666666666666667; smu = sigma*mu; /* Fill-in the rhs. */ for (i = 0; i < 15; i++) work.rhs[i] = 0; for (i = 30; i < 46; i++) work.rhs[i] = 0; for (i = 0; i < 15; i++) r2[i] = work.s_inv[i]*(smu - ds_aff[i]*dz_aff[i]); } __device__ void refine(double *target, double *var) { int i, j; double *residual = work.buffer; double norm2; double *new_var = work.buffer2; for (j = 0; j < settings.refine_steps; j++) { norm2 = 0; matrix_multiply(residual, var); for (i = 0; i < 46; i++) { residual[i] = residual[i] - target[i]; norm2 += residual[i]*residual[i]; } #ifndef ZERO_LIBRARY_MODE if (settings.verbose_refinement) { if (j == 0) printf("Initial residual before refinement has norm squared %.6g.\n", norm2); else printf("After refinement we get squared norm %.6g.\n", norm2); } #endif /* Solve to find new_var = KKT \ (target - A*var). */ ldl_solve(residual, new_var); /* Update var += new_var, or var += KKT \ (target - A*var). */ for (i = 0; i < 46; i++) { var[i] -= new_var[i]; } } #ifndef ZERO_LIBRARY_MODE if (settings.verbose_refinement) { /* Check the residual once more, but only if we're reporting it, since */ /* it's expensive. */ norm2 = 0; matrix_multiply(residual, var); for (i = 0; i < 46; i++) { residual[i] = residual[i] - target[i]; norm2 += residual[i]*residual[i]; } if (j == 0) printf("Initial residual before refinement has norm squared %.6g.\n", norm2); else printf("After refinement we get squared norm %.6g.\n", norm2); } #endif } __device__ double calc_ineq_resid_squared(void) { /* Calculates the norm ||-Gx - s + h||. */ double norm2_squared; int i; /* Find -Gx. */ multbymG(work.buffer, work.x); /* Add -s + h. */ for (i = 0; i < 15; i++) work.buffer[i] += -work.s[i] + work.h[i]; /* Now find the squared norm. */ norm2_squared = 0; for (i = 0; i < 15; i++) norm2_squared += work.buffer[i]*work.buffer[i]; return norm2_squared; } __device__ double calc_eq_resid_squared(void) { /* Calculates the norm ||-Ax + b||. */ double norm2_squared; int i; /* Find -Ax. */ multbymA(work.buffer, work.x); /* Add +b. */ for (i = 0; i < 1; i++) work.buffer[i] += work.b[i]; /* Now find the squared norm. */ norm2_squared = 0; for (i = 0; i < 1; i++) norm2_squared += work.buffer[i]*work.buffer[i]; return norm2_squared; } __device__ void better_start(void) { /* Calculates a better starting point, using a similar approach to CVXOPT. */ /* Not yet speed optimized. */ int i; double *x, *s, *z, *y; double alpha; work.block_33[0] = -1; /* Make sure sinvz is 1 to make hijacked KKT system ok. */ for (i = 0; i < 15; i++) work.s_inv_z[i] = 1; fill_KKT(); ldl_factor(); fillrhs_start(); /* Borrow work.lhs_aff for the solution. */ ldl_solve(work.rhs, work.lhs_aff); /* Don't do any refinement for now. Precision doesn't matter too much. */ x = work.lhs_aff; s = work.lhs_aff + 15; z = work.lhs_aff + 30; y = work.lhs_aff + 45; /* Just set x and y as is. */ for (i = 0; i < 15; i++) work.x[i] = x[i]; for (i = 0; i < 1; i++) work.y[i] = y[i]; /* Now complete the initialization. Start with s. */ /* Must have alpha > max(z). */ alpha = -1e99; for (i = 0; i < 15; i++) if (alpha < z[i]) alpha = z[i]; if (alpha < 0) { for (i = 0; i < 15; i++) work.s[i] = -z[i]; } else { alpha += 1; for (i = 0; i < 15; i++) work.s[i] = -z[i] + alpha; } /* Now initialize z. */ /* Now must have alpha > max(-z). */ alpha = -1e99; for (i = 0; i < 15; i++) if (alpha < -z[i]) alpha = -z[i]; if (alpha < 0) { for (i = 0; i < 15; i++) work.z[i] = z[i]; } else { alpha += 1; for (i = 0; i < 15; i++) work.z[i] = z[i] + alpha; } } __device__ void fillrhs_start(void) { /* Fill rhs with (-q, 0, h, b). */ int i; double *r1, *r2, *r3, *r4; r1 = work.rhs; r2 = work.rhs + 15; r3 = work.rhs + 30; r4 = work.rhs + 45; for (i = 0; i < 15; i++) r1[i] = -work.q[i]; for (i = 0; i < 15; i++) r2[i] = 0; for (i = 0; i < 15; i++) r3[i] = work.h[i]; for (i = 0; i < 1; i++) r4[i] = work.b[i]; } __device__ long solve(void) { int i; int iter; double *dx, *ds, *dy, *dz; double minval; double alpha; work.converged = 0; setup_pointers(); pre_ops(); #ifndef ZERO_LIBRARY_MODE if (settings.verbose) printf("iter objv gap |Ax-b| |Gx+s-h| step\n"); #endif fillq(); fillh(); fillb(); if (settings.better_start) better_start(); else set_start(); for (iter = 0; iter < settings.max_iters; iter++) { for (i = 0; i < 15; i++) { work.s_inv[i] = 1.0 / work.s[i]; work.s_inv_z[i] = work.s_inv[i]*work.z[i]; } work.block_33[0] = 0; fill_KKT(); ldl_factor(); /* Affine scaling directions. */ fillrhs_aff(); ldl_solve(work.rhs, work.lhs_aff); refine(work.rhs, work.lhs_aff); /* Centering plus corrector directions. */ fillrhs_cc(); ldl_solve(work.rhs, work.lhs_cc); refine(work.rhs, work.lhs_cc); /* Add the two together and store in aff. */ for (i = 0; i < 46; i++) work.lhs_aff[i] += work.lhs_cc[i]; /* Rename aff to reflect its new meaning. */ dx = work.lhs_aff; ds = work.lhs_aff + 15; dz = work.lhs_aff + 30; dy = work.lhs_aff + 45; /* Find min(min(ds./s), min(dz./z)). */ minval = 0; for (i = 0; i < 15; i++) if (ds[i] < minval*work.s[i]) minval = ds[i]/work.s[i]; for (i = 0; i < 15; i++) if (dz[i] < minval*work.z[i]) minval = dz[i]/work.z[i]; /* Find alpha. */ if (-0.99 < minval) alpha = 1; else alpha = -0.99/minval; /* Update the primal and dual variables. */ for (i = 0; i < 15; i++) work.x[i] += alpha*dx[i]; for (i = 0; i < 15; i++) work.s[i] += alpha*ds[i]; for (i = 0; i < 15; i++) work.z[i] += alpha*dz[i]; for (i = 0; i < 1; i++) work.y[i] += alpha*dy[i]; work.gap = eval_gap(); work.eq_resid_squared = calc_eq_resid_squared(); work.ineq_resid_squared = calc_ineq_resid_squared(); #ifndef ZERO_LIBRARY_MODE if (settings.verbose) { work.optval = eval_objv(); printf("%3d %10.3e %9.2e %9.2e %9.2e % 6.4f\n", iter+1, work.optval, work.gap, sqrt(work.eq_resid_squared), sqrt(work.ineq_resid_squared), alpha); } #endif /* Test termination conditions. Requires optimality, and satisfied */ /* constraints. */ if ( (work.gap < settings.eps) && (work.eq_resid_squared <= settings.resid_tol*settings.resid_tol) && (work.ineq_resid_squared <= settings.resid_tol*settings.resid_tol) ) { work.converged = 1; work.optval = eval_objv(); return iter+1; } } return iter; } __device__ void multbymA(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(1)-rhs[1]*(1)-rhs[2]*(1)-rhs[3]*(1)-rhs[4]*(1)-rhs[5]*(1)-rhs[6]*(1)-rhs[7]*(1)-rhs[8]*(1)-rhs[9]*(1)-rhs[10]*(1)-rhs[11]*(1)-rhs[12]*(1)-rhs[13]*(1)-rhs[14]*(1); } __device__ void multbymAT(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(1); lhs[1] = -rhs[0]*(1); lhs[2] = -rhs[0]*(1); lhs[3] = -rhs[0]*(1); lhs[4] = -rhs[0]*(1); lhs[5] = -rhs[0]*(1); lhs[6] = -rhs[0]*(1); lhs[7] = -rhs[0]*(1); lhs[8] = -rhs[0]*(1); lhs[9] = -rhs[0]*(1); lhs[10] = -rhs[0]*(1); lhs[11] = -rhs[0]*(1); lhs[12] = -rhs[0]*(1); lhs[13] = -rhs[0]*(1); lhs[14] = -rhs[0]*(1); } __device__ void multbymG(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(-1); lhs[1] = -rhs[1]*(-1); lhs[2] = -rhs[2]*(-1); lhs[3] = -rhs[3]*(-1); lhs[4] = -rhs[4]*(-1); lhs[5] = -rhs[5]*(-1); lhs[6] = -rhs[6]*(-1); lhs[7] = -rhs[7]*(-1); lhs[8] = -rhs[8]*(-1); lhs[9] = -rhs[9]*(-1); lhs[10] = -rhs[10]*(-1); lhs[11] = -rhs[11]*(-1); lhs[12] = -rhs[12]*(-1); lhs[13] = -rhs[13]*(-1); lhs[14] = -rhs[14]*(-1); } __device__ void multbymGT(double *lhs, double *rhs) { lhs[0] = -rhs[0]*(-1); lhs[1] = -rhs[1]*(-1); lhs[2] = -rhs[2]*(-1); lhs[3] = -rhs[3]*(-1); lhs[4] = -rhs[4]*(-1); lhs[5] = -rhs[5]*(-1); lhs[6] = -rhs[6]*(-1); lhs[7] = -rhs[7]*(-1); lhs[8] = -rhs[8]*(-1); lhs[9] = -rhs[9]*(-1); lhs[10] = -rhs[10]*(-1); lhs[11] = -rhs[11]*(-1); lhs[12] = -rhs[12]*(-1); lhs[13] = -rhs[13]*(-1); lhs[14] = -rhs[14]*(-1); } __device__ void multbyP(double *lhs, double *rhs) { /* TODO use the fact that P is symmetric? */ /* TODO check doubling / half factor etc. */ lhs[0] = rhs[0]*(2); lhs[1] = rhs[1]*(2); lhs[2] = rhs[2]*(2); lhs[3] = rhs[3]*(2); lhs[4] = rhs[4]*(2); lhs[5] = rhs[5]*(2); lhs[6] = rhs[6]*(2); lhs[7] = rhs[7]*(2); lhs[8] = rhs[8]*(2); lhs[9] = rhs[9]*(2); lhs[10] = rhs[10]*(2); lhs[11] = rhs[11]*(2); lhs[12] = rhs[12]*(2); lhs[13] = rhs[13]*(2); lhs[14] = rhs[14]*(2); } __device__ void fillq(void) { work.q[0] = -2*params.Hi[0]; work.q[1] = -2*params.Hi[1]; work.q[2] = -2*params.Hi[2]; work.q[3] = -2*params.Hi[3]; work.q[4] = -2*params.Hi[4]; work.q[5] = -2*params.Hi[5]; work.q[6] = -2*params.Hi[6]; work.q[7] = -2*params.Hi[7]; work.q[8] = -2*params.Hi[8]; work.q[9] = -2*params.Hi[9]; work.q[10] = -2*params.Hi[10]; work.q[11] = -2*params.Hi[11]; work.q[12] = -2*params.Hi[12]; work.q[13] = -2*params.Hi[13]; work.q[14] = -2*params.Hi[14]; } __device__ void fillh(void) { work.h[0] = 0; work.h[1] = 0; work.h[2] = 0; work.h[3] = 0; work.h[4] = 0; work.h[5] = 0; work.h[6] = 0; work.h[7] = 0; work.h[8] = 0; work.h[9] = 0; work.h[10] = 0; work.h[11] = 0; work.h[12] = 0; work.h[13] = 0; work.h[14] = 0; } __device__ void fillb(void) { work.b[0] = 1; } __device__ void pre_ops(void) { work.quad_640466485248[0] = params.Hi[0]*params.Hi[0]+params.Hi[1]*params.Hi[1]+params.Hi[2]*params.Hi[2]+params.Hi[3]*params.Hi[3]+params.Hi[4]*params.Hi[4]+params.Hi[5]*params.Hi[5]+params.Hi[6]*params.Hi[6]+params.Hi[7]*params.Hi[7]+params.Hi[8]*params.Hi[8]+params.Hi[9]*params.Hi[9]+params.Hi[10]*params.Hi[10]+params.Hi[11]*params.Hi[11]+params.Hi[12]*params.Hi[12]+params.Hi[13]*params.Hi[13]+params.Hi[14]*params.Hi[14]; } /* Be sure to place ldl_solve first, so storage schemes are defined by it. */ __device__ void ldl_solve(double *target, double *var) { int i; /* Find var = (L*diag(work.d)*L') \ target, then unpermute. */ /* Answer goes into var. */ /* Forward substitution. */ /* Include permutation as we retrieve from target. Use v so we can unpermute */ /* later. */ work.v[0] = target[15]; work.v[1] = target[16]; work.v[2] = target[17]; work.v[3] = target[18]; work.v[4] = target[19]; work.v[5] = target[20]; work.v[6] = target[21]; work.v[7] = target[22]; work.v[8] = target[23]; work.v[9] = target[24]; work.v[10] = target[25]; work.v[11] = target[26]; work.v[12] = target[27]; work.v[13] = target[28]; work.v[14] = target[29]; work.v[15] = target[30]-work.L[0]*work.v[0]; work.v[16] = target[31]-work.L[1]*work.v[1]; work.v[17] = target[32]-work.L[2]*work.v[2]; work.v[18] = target[33]-work.L[3]*work.v[3]; work.v[19] = target[34]-work.L[4]*work.v[4]; work.v[20] = target[35]-work.L[5]*work.v[5]; work.v[21] = target[36]-work.L[6]*work.v[6]; work.v[22] = target[37]-work.L[7]*work.v[7]; work.v[23] = target[38]-work.L[8]*work.v[8]; work.v[24] = target[39]-work.L[9]*work.v[9]; work.v[25] = target[40]-work.L[10]*work.v[10]; work.v[26] = target[41]-work.L[11]*work.v[11]; work.v[27] = target[42]-work.L[12]*work.v[12]; work.v[28] = target[43]-work.L[13]*work.v[13]; work.v[29] = target[44]-work.L[14]*work.v[14]; work.v[30] = target[0]-work.L[15]*work.v[15]; work.v[31] = target[1]-work.L[16]*work.v[16]; work.v[32] = target[2]-work.L[17]*work.v[17]; work.v[33] = target[3]-work.L[18]*work.v[18]; work.v[34] = target[4]-work.L[19]*work.v[19]; work.v[35] = target[5]-work.L[20]*work.v[20]; work.v[36] = target[6]-work.L[21]*work.v[21]; work.v[37] = target[7]-work.L[22]*work.v[22]; work.v[38] = target[8]-work.L[23]*work.v[23]; work.v[39] = target[9]-work.L[24]*work.v[24]; work.v[40] = target[10]-work.L[25]*work.v[25]; work.v[41] = target[11]-work.L[26]*work.v[26]; work.v[42] = target[12]-work.L[27]*work.v[27]; work.v[43] = target[13]-work.L[28]*work.v[28]; work.v[44] = target[14]-work.L[29]*work.v[29]; work.v[45] = target[45]-work.L[30]*work.v[30]-work.L[31]*work.v[31]-work.L[32]*work.v[32]-work.L[33]*work.v[33]-work.L[34]*work.v[34]-work.L[35]*work.v[35]-work.L[36]*work.v[36]-work.L[37]*work.v[37]-work.L[38]*work.v[38]-work.L[39]*work.v[39]-work.L[40]*work.v[40]-work.L[41]*work.v[41]-work.L[42]*work.v[42]-work.L[43]*work.v[43]-work.L[44]*work.v[44]; /* Diagonal scaling. Assume correctness of work.d_inv. */ for (i = 0; i < 46; i++) work.v[i] *= work.d_inv[i]; /* Back substitution */ work.v[44] -= work.L[44]*work.v[45]; work.v[43] -= work.L[43]*work.v[45]; work.v[42] -= work.L[42]*work.v[45]; work.v[41] -= work.L[41]*work.v[45]; work.v[40] -= work.L[40]*work.v[45]; work.v[39] -= work.L[39]*work.v[45]; work.v[38] -= work.L[38]*work.v[45]; work.v[37] -= work.L[37]*work.v[45]; work.v[36] -= work.L[36]*work.v[45]; work.v[35] -= work.L[35]*work.v[45]; work.v[34] -= work.L[34]*work.v[45]; work.v[33] -= work.L[33]*work.v[45]; work.v[32] -= work.L[32]*work.v[45]; work.v[31] -= work.L[31]*work.v[45]; work.v[30] -= work.L[30]*work.v[45]; work.v[29] -= work.L[29]*work.v[44]; work.v[28] -= work.L[28]*work.v[43]; work.v[27] -= work.L[27]*work.v[42]; work.v[26] -= work.L[26]*work.v[41]; work.v[25] -= work.L[25]*work.v[40]; work.v[24] -= work.L[24]*work.v[39]; work.v[23] -= work.L[23]*work.v[38]; work.v[22] -= work.L[22]*work.v[37]; work.v[21] -= work.L[21]*work.v[36]; work.v[20] -= work.L[20]*work.v[35]; work.v[19] -= work.L[19]*work.v[34]; work.v[18] -= work.L[18]*work.v[33]; work.v[17] -= work.L[17]*work.v[32]; work.v[16] -= work.L[16]*work.v[31]; work.v[15] -= work.L[15]*work.v[30]; work.v[14] -= work.L[14]*work.v[29]; work.v[13] -= work.L[13]*work.v[28]; work.v[12] -= work.L[12]*work.v[27]; work.v[11] -= work.L[11]*work.v[26]; work.v[10] -= work.L[10]*work.v[25]; work.v[9] -= work.L[9]*work.v[24]; work.v[8] -= work.L[8]*work.v[23]; work.v[7] -= work.L[7]*work.v[22]; work.v[6] -= work.L[6]*work.v[21]; work.v[5] -= work.L[5]*work.v[20]; work.v[4] -= work.L[4]*work.v[19]; work.v[3] -= work.L[3]*work.v[18]; work.v[2] -= work.L[2]*work.v[17]; work.v[1] -= work.L[1]*work.v[16]; work.v[0] -= work.L[0]*work.v[15]; /* Unpermute the result, from v to var. */ var[0] = work.v[30]; var[1] = work.v[31]; var[2] = work.v[32]; var[3] = work.v[33]; var[4] = work.v[34]; var[5] = work.v[35]; var[6] = work.v[36]; var[7] = work.v[37]; var[8] = work.v[38]; var[9] = work.v[39]; var[10] = work.v[40]; var[11] = work.v[41]; var[12] = work.v[42]; var[13] = work.v[43]; var[14] = work.v[44]; var[15] = work.v[0]; var[16] = work.v[1]; var[17] = work.v[2]; var[18] = work.v[3]; var[19] = work.v[4]; var[20] = work.v[5]; var[21] = work.v[6]; var[22] = work.v[7]; var[23] = work.v[8]; var[24] = work.v[9]; var[25] = work.v[10]; var[26] = work.v[11]; var[27] = work.v[12]; var[28] = work.v[13]; var[29] = work.v[14]; var[30] = work.v[15]; var[31] = work.v[16]; var[32] = work.v[17]; var[33] = work.v[18]; var[34] = work.v[19]; var[35] = work.v[20]; var[36] = work.v[21]; var[37] = work.v[22]; var[38] = work.v[23]; var[39] = work.v[24]; var[40] = work.v[25]; var[41] = work.v[26]; var[42] = work.v[27]; var[43] = work.v[28]; var[44] = work.v[29]; var[45] = work.v[45]; #ifndef ZERO_LIBRARY_MODE if (settings.debug) { printf("Squared norm for solution is %.8g.\n", check_residual(target, var)); } #endif } __device__ void ldl_factor(void) { work.d[0] = work.KKT[0]; if (work.d[0] < 0) work.d[0] = settings.kkt_reg; else work.d[0] += settings.kkt_reg; work.d_inv[0] = 1/work.d[0]; work.L[0] = work.KKT[1]*work.d_inv[0]; work.v[1] = work.KKT[2]; work.d[1] = work.v[1]; if (work.d[1] < 0) work.d[1] = settings.kkt_reg; else work.d[1] += settings.kkt_reg; work.d_inv[1] = 1/work.d[1]; work.L[1] = (work.KKT[3])*work.d_inv[1]; work.v[2] = work.KKT[4]; work.d[2] = work.v[2]; if (work.d[2] < 0) work.d[2] = settings.kkt_reg; else work.d[2] += settings.kkt_reg; work.d_inv[2] = 1/work.d[2]; work.L[2] = (work.KKT[5])*work.d_inv[2]; work.v[3] = work.KKT[6]; work.d[3] = work.v[3]; if (work.d[3] < 0) work.d[3] = settings.kkt_reg; else work.d[3] += settings.kkt_reg; work.d_inv[3] = 1/work.d[3]; work.L[3] = (work.KKT[7])*work.d_inv[3]; work.v[4] = work.KKT[8]; work.d[4] = work.v[4]; if (work.d[4] < 0) work.d[4] = settings.kkt_reg; else work.d[4] += settings.kkt_reg; work.d_inv[4] = 1/work.d[4]; work.L[4] = (work.KKT[9])*work.d_inv[4]; work.v[5] = work.KKT[10]; work.d[5] = work.v[5]; if (work.d[5] < 0) work.d[5] = settings.kkt_reg; else work.d[5] += settings.kkt_reg; work.d_inv[5] = 1/work.d[5]; work.L[5] = (work.KKT[11])*work.d_inv[5]; work.v[6] = work.KKT[12]; work.d[6] = work.v[6]; if (work.d[6] < 0) work.d[6] = settings.kkt_reg; else work.d[6] += settings.kkt_reg; work.d_inv[6] = 1/work.d[6]; work.L[6] = (work.KKT[13])*work.d_inv[6]; work.v[7] = work.KKT[14]; work.d[7] = work.v[7]; if (work.d[7] < 0) work.d[7] = settings.kkt_reg; else work.d[7] += settings.kkt_reg; work.d_inv[7] = 1/work.d[7]; work.L[7] = (work.KKT[15])*work.d_inv[7]; work.v[8] = work.KKT[16]; work.d[8] = work.v[8]; if (work.d[8] < 0) work.d[8] = settings.kkt_reg; else work.d[8] += settings.kkt_reg; work.d_inv[8] = 1/work.d[8]; work.L[8] = (work.KKT[17])*work.d_inv[8]; work.v[9] = work.KKT[18]; work.d[9] = work.v[9]; if (work.d[9] < 0) work.d[9] = settings.kkt_reg; else work.d[9] += settings.kkt_reg; work.d_inv[9] = 1/work.d[9]; work.L[9] = (work.KKT[19])*work.d_inv[9]; work.v[10] = work.KKT[20]; work.d[10] = work.v[10]; if (work.d[10] < 0) work.d[10] = settings.kkt_reg; else work.d[10] += settings.kkt_reg; work.d_inv[10] = 1/work.d[10]; work.L[10] = (work.KKT[21])*work.d_inv[10]; work.v[11] = work.KKT[22]; work.d[11] = work.v[11]; if (work.d[11] < 0) work.d[11] = settings.kkt_reg; else work.d[11] += settings.kkt_reg; work.d_inv[11] = 1/work.d[11]; work.L[11] = (work.KKT[23])*work.d_inv[11]; work.v[12] = work.KKT[24]; work.d[12] = work.v[12]; if (work.d[12] < 0) work.d[12] = settings.kkt_reg; else work.d[12] += settings.kkt_reg; work.d_inv[12] = 1/work.d[12]; work.L[12] = (work.KKT[25])*work.d_inv[12]; work.v[13] = work.KKT[26]; work.d[13] = work.v[13]; if (work.d[13] < 0) work.d[13] = settings.kkt_reg; else work.d[13] += settings.kkt_reg; work.d_inv[13] = 1/work.d[13]; work.L[13] = (work.KKT[27])*work.d_inv[13]; work.v[14] = work.KKT[28]; work.d[14] = work.v[14]; if (work.d[14] < 0) work.d[14] = settings.kkt_reg; else work.d[14] += settings.kkt_reg; work.d_inv[14] = 1/work.d[14]; work.L[14] = (work.KKT[29])*work.d_inv[14]; work.v[0] = work.L[0]*work.d[0]; work.v[15] = work.KKT[30]-work.L[0]*work.v[0]; work.d[15] = work.v[15]; if (work.d[15] > 0) work.d[15] = -settings.kkt_reg; else work.d[15] -= settings.kkt_reg; work.d_inv[15] = 1/work.d[15]; work.L[15] = (work.KKT[31])*work.d_inv[15]; work.v[1] = work.L[1]*work.d[1]; work.v[16] = work.KKT[32]-work.L[1]*work.v[1]; work.d[16] = work.v[16]; if (work.d[16] > 0) work.d[16] = -settings.kkt_reg; else work.d[16] -= settings.kkt_reg; work.d_inv[16] = 1/work.d[16]; work.L[16] = (work.KKT[33])*work.d_inv[16]; work.v[2] = work.L[2]*work.d[2]; work.v[17] = work.KKT[34]-work.L[2]*work.v[2]; work.d[17] = work.v[17]; if (work.d[17] > 0) work.d[17] = -settings.kkt_reg; else work.d[17] -= settings.kkt_reg; work.d_inv[17] = 1/work.d[17]; work.L[17] = (work.KKT[35])*work.d_inv[17]; work.v[3] = work.L[3]*work.d[3]; work.v[18] = work.KKT[36]-work.L[3]*work.v[3]; work.d[18] = work.v[18]; if (work.d[18] > 0) work.d[18] = -settings.kkt_reg; else work.d[18] -= settings.kkt_reg; work.d_inv[18] = 1/work.d[18]; work.L[18] = (work.KKT[37])*work.d_inv[18]; work.v[4] = work.L[4]*work.d[4]; work.v[19] = work.KKT[38]-work.L[4]*work.v[4]; work.d[19] = work.v[19]; if (work.d[19] > 0) work.d[19] = -settings.kkt_reg; else work.d[19] -= settings.kkt_reg; work.d_inv[19] = 1/work.d[19]; work.L[19] = (work.KKT[39])*work.d_inv[19]; work.v[5] = work.L[5]*work.d[5]; work.v[20] = work.KKT[40]-work.L[5]*work.v[5]; work.d[20] = work.v[20]; if (work.d[20] > 0) work.d[20] = -settings.kkt_reg; else work.d[20] -= settings.kkt_reg; work.d_inv[20] = 1/work.d[20]; work.L[20] = (work.KKT[41])*work.d_inv[20]; work.v[6] = work.L[6]*work.d[6]; work.v[21] = work.KKT[42]-work.L[6]*work.v[6]; work.d[21] = work.v[21]; if (work.d[21] > 0) work.d[21] = -settings.kkt_reg; else work.d[21] -= settings.kkt_reg; work.d_inv[21] = 1/work.d[21]; work.L[21] = (work.KKT[43])*work.d_inv[21]; work.v[7] = work.L[7]*work.d[7]; work.v[22] = work.KKT[44]-work.L[7]*work.v[7]; work.d[22] = work.v[22]; if (work.d[22] > 0) work.d[22] = -settings.kkt_reg; else work.d[22] -= settings.kkt_reg; work.d_inv[22] = 1/work.d[22]; work.L[22] = (work.KKT[45])*work.d_inv[22]; work.v[8] = work.L[8]*work.d[8]; work.v[23] = work.KKT[46]-work.L[8]*work.v[8]; work.d[23] = work.v[23]; if (work.d[23] > 0) work.d[23] = -settings.kkt_reg; else work.d[23] -= settings.kkt_reg; work.d_inv[23] = 1/work.d[23]; work.L[23] = (work.KKT[47])*work.d_inv[23]; work.v[9] = work.L[9]*work.d[9]; work.v[24] = work.KKT[48]-work.L[9]*work.v[9]; work.d[24] = work.v[24]; if (work.d[24] > 0) work.d[24] = -settings.kkt_reg; else work.d[24] -= settings.kkt_reg; work.d_inv[24] = 1/work.d[24]; work.L[24] = (work.KKT[49])*work.d_inv[24]; work.v[10] = work.L[10]*work.d[10]; work.v[25] = work.KKT[50]-work.L[10]*work.v[10]; work.d[25] = work.v[25]; if (work.d[25] > 0) work.d[25] = -settings.kkt_reg; else work.d[25] -= settings.kkt_reg; work.d_inv[25] = 1/work.d[25]; work.L[25] = (work.KKT[51])*work.d_inv[25]; work.v[11] = work.L[11]*work.d[11]; work.v[26] = work.KKT[52]-work.L[11]*work.v[11]; work.d[26] = work.v[26]; if (work.d[26] > 0) work.d[26] = -settings.kkt_reg; else work.d[26] -= settings.kkt_reg; work.d_inv[26] = 1/work.d[26]; work.L[26] = (work.KKT[53])*work.d_inv[26]; work.v[12] = work.L[12]*work.d[12]; work.v[27] = work.KKT[54]-work.L[12]*work.v[12]; work.d[27] = work.v[27]; if (work.d[27] > 0) work.d[27] = -settings.kkt_reg; else work.d[27] -= settings.kkt_reg; work.d_inv[27] = 1/work.d[27]; work.L[27] = (work.KKT[55])*work.d_inv[27]; work.v[13] = work.L[13]*work.d[13]; work.v[28] = work.KKT[56]-work.L[13]*work.v[13]; work.d[28] = work.v[28]; if (work.d[28] > 0) work.d[28] = -settings.kkt_reg; else work.d[28] -= settings.kkt_reg; work.d_inv[28] = 1/work.d[28]; work.L[28] = (work.KKT[57])*work.d_inv[28]; work.v[14] = work.L[14]*work.d[14]; work.v[29] = work.KKT[58]-work.L[14]*work.v[14]; work.d[29] = work.v[29]; if (work.d[29] > 0) work.d[29] = -settings.kkt_reg; else work.d[29] -= settings.kkt_reg; work.d_inv[29] = 1/work.d[29]; work.L[29] = (work.KKT[59])*work.d_inv[29]; work.v[15] = work.L[15]*work.d[15]; work.v[30] = work.KKT[60]-work.L[15]*work.v[15]; work.d[30] = work.v[30]; if (work.d[30] < 0) work.d[30] = settings.kkt_reg; else work.d[30] += settings.kkt_reg; work.d_inv[30] = 1/work.d[30]; work.L[30] = (work.KKT[61])*work.d_inv[30]; work.v[16] = work.L[16]*work.d[16]; work.v[31] = work.KKT[62]-work.L[16]*work.v[16]; work.d[31] = work.v[31]; if (work.d[31] < 0) work.d[31] = settings.kkt_reg; else work.d[31] += settings.kkt_reg; work.d_inv[31] = 1/work.d[31]; work.L[31] = (work.KKT[63])*work.d_inv[31]; work.v[17] = work.L[17]*work.d[17]; work.v[32] = work.KKT[64]-work.L[17]*work.v[17]; work.d[32] = work.v[32]; if (work.d[32] < 0) work.d[32] = settings.kkt_reg; else work.d[32] += settings.kkt_reg; work.d_inv[32] = 1/work.d[32]; work.L[32] = (work.KKT[65])*work.d_inv[32]; work.v[18] = work.L[18]*work.d[18]; work.v[33] = work.KKT[66]-work.L[18]*work.v[18]; work.d[33] = work.v[33]; if (work.d[33] < 0) work.d[33] = settings.kkt_reg; else work.d[33] += settings.kkt_reg; work.d_inv[33] = 1/work.d[33]; work.L[33] = (work.KKT[67])*work.d_inv[33]; work.v[19] = work.L[19]*work.d[19]; work.v[34] = work.KKT[68]-work.L[19]*work.v[19]; work.d[34] = work.v[34]; if (work.d[34] < 0) work.d[34] = settings.kkt_reg; else work.d[34] += settings.kkt_reg; work.d_inv[34] = 1/work.d[34]; work.L[34] = (work.KKT[69])*work.d_inv[34]; work.v[20] = work.L[20]*work.d[20]; work.v[35] = work.KKT[70]-work.L[20]*work.v[20]; work.d[35] = work.v[35]; if (work.d[35] < 0) work.d[35] = settings.kkt_reg; else work.d[35] += settings.kkt_reg; work.d_inv[35] = 1/work.d[35]; work.L[35] = (work.KKT[71])*work.d_inv[35]; work.v[21] = work.L[21]*work.d[21]; work.v[36] = work.KKT[72]-work.L[21]*work.v[21]; work.d[36] = work.v[36]; if (work.d[36] < 0) work.d[36] = settings.kkt_reg; else work.d[36] += settings.kkt_reg; work.d_inv[36] = 1/work.d[36]; work.L[36] = (work.KKT[73])*work.d_inv[36]; work.v[22] = work.L[22]*work.d[22]; work.v[37] = work.KKT[74]-work.L[22]*work.v[22]; work.d[37] = work.v[37]; if (work.d[37] < 0) work.d[37] = settings.kkt_reg; else work.d[37] += settings.kkt_reg; work.d_inv[37] = 1/work.d[37]; work.L[37] = (work.KKT[75])*work.d_inv[37]; work.v[23] = work.L[23]*work.d[23]; work.v[38] = work.KKT[76]-work.L[23]*work.v[23]; work.d[38] = work.v[38]; if (work.d[38] < 0) work.d[38] = settings.kkt_reg; else work.d[38] += settings.kkt_reg; work.d_inv[38] = 1/work.d[38]; work.L[38] = (work.KKT[77])*work.d_inv[38]; work.v[24] = work.L[24]*work.d[24]; work.v[39] = work.KKT[78]-work.L[24]*work.v[24]; work.d[39] = work.v[39]; if (work.d[39] < 0) work.d[39] = settings.kkt_reg; else work.d[39] += settings.kkt_reg; work.d_inv[39] = 1/work.d[39]; work.L[39] = (work.KKT[79])*work.d_inv[39]; work.v[25] = work.L[25]*work.d[25]; work.v[40] = work.KKT[80]-work.L[25]*work.v[25]; work.d[40] = work.v[40]; if (work.d[40] < 0) work.d[40] = settings.kkt_reg; else work.d[40] += settings.kkt_reg; work.d_inv[40] = 1/work.d[40]; work.L[40] = (work.KKT[81])*work.d_inv[40]; work.v[26] = work.L[26]*work.d[26]; work.v[41] = work.KKT[82]-work.L[26]*work.v[26]; work.d[41] = work.v[41]; if (work.d[41] < 0) work.d[41] = settings.kkt_reg; else work.d[41] += settings.kkt_reg; work.d_inv[41] = 1/work.d[41]; work.L[41] = (work.KKT[83])*work.d_inv[41]; work.v[27] = work.L[27]*work.d[27]; work.v[42] = work.KKT[84]-work.L[27]*work.v[27]; work.d[42] = work.v[42]; if (work.d[42] < 0) work.d[42] = settings.kkt_reg; else work.d[42] += settings.kkt_reg; work.d_inv[42] = 1/work.d[42]; work.L[42] = (work.KKT[85])*work.d_inv[42]; work.v[28] = work.L[28]*work.d[28]; work.v[43] = work.KKT[86]-work.L[28]*work.v[28]; work.d[43] = work.v[43]; if (work.d[43] < 0) work.d[43] = settings.kkt_reg; else work.d[43] += settings.kkt_reg; work.d_inv[43] = 1/work.d[43]; work.L[43] = (work.KKT[87])*work.d_inv[43]; work.v[29] = work.L[29]*work.d[29]; work.v[44] = work.KKT[88]-work.L[29]*work.v[29]; work.d[44] = work.v[44]; if (work.d[44] < 0) work.d[44] = settings.kkt_reg; else work.d[44] += settings.kkt_reg; work.d_inv[44] = 1/work.d[44]; work.L[44] = (work.KKT[89])*work.d_inv[44]; work.v[30] = work.L[30]*work.d[30]; work.v[31] = work.L[31]*work.d[31]; work.v[32] = work.L[32]*work.d[32]; work.v[33] = work.L[33]*work.d[33]; work.v[34] = work.L[34]*work.d[34]; work.v[35] = work.L[35]*work.d[35]; work.v[36] = work.L[36]*work.d[36]; work.v[37] = work.L[37]*work.d[37]; work.v[38] = work.L[38]*work.d[38]; work.v[39] = work.L[39]*work.d[39]; work.v[40] = work.L[40]*work.d[40]; work.v[41] = work.L[41]*work.d[41]; work.v[42] = work.L[42]*work.d[42]; work.v[43] = work.L[43]*work.d[43]; work.v[44] = work.L[44]*work.d[44]; work.v[45] = 0-work.L[30]*work.v[30]-work.L[31]*work.v[31]-work.L[32]*work.v[32]-work.L[33]*work.v[33]-work.L[34]*work.v[34]-work.L[35]*work.v[35]-work.L[36]*work.v[36]-work.L[37]*work.v[37]-work.L[38]*work.v[38]-work.L[39]*work.v[39]-work.L[40]*work.v[40]-work.L[41]*work.v[41]-work.L[42]*work.v[42]-work.L[43]*work.v[43]-work.L[44]*work.v[44]; work.d[45] = work.v[45]; if (work.d[45] > 0) work.d[45] = -settings.kkt_reg; else work.d[45] -= settings.kkt_reg; work.d_inv[45] = 1/work.d[45]; #ifndef ZERO_LIBRARY_MODE if (settings.debug) { printf("Squared Frobenius for factorization is %.8g.\n", check_factorization()); } #endif } __device__ double check_factorization(void) { /* Returns the squared Frobenius norm of A - L*D*L'. */ double temp, residual; /* Only check the lower triangle. */ residual = 0; temp = work.KKT[60]-1*work.d[30]*1-work.L[15]*work.d[15]*work.L[15]; residual += temp*temp; temp = work.KKT[62]-1*work.d[31]*1-work.L[16]*work.d[16]*work.L[16]; residual += temp*temp; temp = work.KKT[64]-1*work.d[32]*1-work.L[17]*work.d[17]*work.L[17]; residual += temp*temp; temp = work.KKT[66]-1*work.d[33]*1-work.L[18]*work.d[18]*work.L[18]; residual += temp*temp; temp = work.KKT[68]-1*work.d[34]*1-work.L[19]*work.d[19]*work.L[19]; residual += temp*temp; temp = work.KKT[70]-1*work.d[35]*1-work.L[20]*work.d[20]*work.L[20]; residual += temp*temp; temp = work.KKT[72]-1*work.d[36]*1-work.L[21]*work.d[21]*work.L[21]; residual += temp*temp; temp = work.KKT[74]-1*work.d[37]*1-work.L[22]*work.d[22]*work.L[22]; residual += temp*temp; temp = work.KKT[76]-1*work.d[38]*1-work.L[23]*work.d[23]*work.L[23]; residual += temp*temp; temp = work.KKT[78]-1*work.d[39]*1-work.L[24]*work.d[24]*work.L[24]; residual += temp*temp; temp = work.KKT[80]-1*work.d[40]*1-work.L[25]*work.d[25]*work.L[25]; residual += temp*temp; temp = work.KKT[82]-1*work.d[41]*1-work.L[26]*work.d[26]*work.L[26]; residual += temp*temp; temp = work.KKT[84]-1*work.d[42]*1-work.L[27]*work.d[27]*work.L[27]; residual += temp*temp; temp = work.KKT[86]-1*work.d[43]*1-work.L[28]*work.d[28]*work.L[28]; residual += temp*temp; temp = work.KKT[88]-1*work.d[44]*1-work.L[29]*work.d[29]*work.L[29]; residual += temp*temp; temp = work.KKT[0]-1*work.d[0]*1; residual += temp*temp; temp = work.KKT[2]-1*work.d[1]*1; residual += temp*temp; temp = work.KKT[4]-1*work.d[2]*1; residual += temp*temp; temp = work.KKT[6]-1*work.d[3]*1; residual += temp*temp; temp = work.KKT[8]-1*work.d[4]*1; residual += temp*temp; temp = work.KKT[10]-1*work.d[5]*1; residual += temp*temp; temp = work.KKT[12]-1*work.d[6]*1; residual += temp*temp; temp = work.KKT[14]-1*work.d[7]*1; residual += temp*temp; temp = work.KKT[16]-1*work.d[8]*1; residual += temp*temp; temp = work.KKT[18]-1*work.d[9]*1; residual += temp*temp; temp = work.KKT[20]-1*work.d[10]*1; residual += temp*temp; temp = work.KKT[22]-1*work.d[11]*1; residual += temp*temp; temp = work.KKT[24]-1*work.d[12]*1; residual += temp*temp; temp = work.KKT[26]-1*work.d[13]*1; residual += temp*temp; temp = work.KKT[28]-1*work.d[14]*1; residual += temp*temp; temp = work.KKT[1]-work.L[0]*work.d[0]*1; residual += temp*temp; temp = work.KKT[3]-work.L[1]*work.d[1]*1; residual += temp*temp; temp = work.KKT[5]-work.L[2]*work.d[2]*1; residual += temp*temp; temp = work.KKT[7]-work.L[3]*work.d[3]*1; residual += temp*temp; temp = work.KKT[9]-work.L[4]*work.d[4]*1; residual += temp*temp; temp = work.KKT[11]-work.L[5]*work.d[5]*1; residual += temp*temp; temp = work.KKT[13]-work.L[6]*work.d[6]*1; residual += temp*temp; temp = work.KKT[15]-work.L[7]*work.d[7]*1; residual += temp*temp; temp = work.KKT[17]-work.L[8]*work.d[8]*1; residual += temp*temp; temp = work.KKT[19]-work.L[9]*work.d[9]*1; residual += temp*temp; temp = work.KKT[21]-work.L[10]*work.d[10]*1; residual += temp*temp; temp = work.KKT[23]-work.L[11]*work.d[11]*1; residual += temp*temp; temp = work.KKT[25]-work.L[12]*work.d[12]*1; residual += temp*temp; temp = work.KKT[27]-work.L[13]*work.d[13]*1; residual += temp*temp; temp = work.KKT[29]-work.L[14]*work.d[14]*1; residual += temp*temp; temp = work.KKT[30]-work.L[0]*work.d[0]*work.L[0]-1*work.d[15]*1; residual += temp*temp; temp = work.KKT[32]-work.L[1]*work.d[1]*work.L[1]-1*work.d[16]*1; residual += temp*temp; temp = work.KKT[34]-work.L[2]*work.d[2]*work.L[2]-1*work.d[17]*1; residual += temp*temp; temp = work.KKT[36]-work.L[3]*work.d[3]*work.L[3]-1*work.d[18]*1; residual += temp*temp; temp = work.KKT[38]-work.L[4]*work.d[4]*work.L[4]-1*work.d[19]*1; residual += temp*temp; temp = work.KKT[40]-work.L[5]*work.d[5]*work.L[5]-1*work.d[20]*1; residual += temp*temp; temp = work.KKT[42]-work.L[6]*work.d[6]*work.L[6]-1*work.d[21]*1; residual += temp*temp; temp = work.KKT[44]-work.L[7]*work.d[7]*work.L[7]-1*work.d[22]*1; residual += temp*temp; temp = work.KKT[46]-work.L[8]*work.d[8]*work.L[8]-1*work.d[23]*1; residual += temp*temp; temp = work.KKT[48]-work.L[9]*work.d[9]*work.L[9]-1*work.d[24]*1; residual += temp*temp; temp = work.KKT[50]-work.L[10]*work.d[10]*work.L[10]-1*work.d[25]*1; residual += temp*temp; temp = work.KKT[52]-work.L[11]*work.d[11]*work.L[11]-1*work.d[26]*1; residual += temp*temp; temp = work.KKT[54]-work.L[12]*work.d[12]*work.L[12]-1*work.d[27]*1; residual += temp*temp; temp = work.KKT[56]-work.L[13]*work.d[13]*work.L[13]-1*work.d[28]*1; residual += temp*temp; temp = work.KKT[58]-work.L[14]*work.d[14]*work.L[14]-1*work.d[29]*1; residual += temp*temp; temp = work.KKT[31]-1*work.d[15]*work.L[15]; residual += temp*temp; temp = work.KKT[33]-1*work.d[16]*work.L[16]; residual += temp*temp; temp = work.KKT[35]-1*work.d[17]*work.L[17]; residual += temp*temp; temp = work.KKT[37]-1*work.d[18]*work.L[18]; residual += temp*temp; temp = work.KKT[39]-1*work.d[19]*work.L[19]; residual += temp*temp; temp = work.KKT[41]-1*work.d[20]*work.L[20]; residual += temp*temp; temp = work.KKT[43]-1*work.d[21]*work.L[21]; residual += temp*temp; temp = work.KKT[45]-1*work.d[22]*work.L[22]; residual += temp*temp; temp = work.KKT[47]-1*work.d[23]*work.L[23]; residual += temp*temp; temp = work.KKT[49]-1*work.d[24]*work.L[24]; residual += temp*temp; temp = work.KKT[51]-1*work.d[25]*work.L[25]; residual += temp*temp; temp = work.KKT[53]-1*work.d[26]*work.L[26]; residual += temp*temp; temp = work.KKT[55]-1*work.d[27]*work.L[27]; residual += temp*temp; temp = work.KKT[57]-1*work.d[28]*work.L[28]; residual += temp*temp; temp = work.KKT[59]-1*work.d[29]*work.L[29]; residual += temp*temp; temp = work.KKT[61]-work.L[30]*work.d[30]*1; residual += temp*temp; temp = work.KKT[63]-work.L[31]*work.d[31]*1; residual += temp*temp; temp = work.KKT[65]-work.L[32]*work.d[32]*1; residual += temp*temp; temp = work.KKT[67]-work.L[33]*work.d[33]*1; residual += temp*temp; temp = work.KKT[69]-work.L[34]*work.d[34]*1; residual += temp*temp; temp = work.KKT[71]-work.L[35]*work.d[35]*1; residual += temp*temp; temp = work.KKT[73]-work.L[36]*work.d[36]*1; residual += temp*temp; temp = work.KKT[75]-work.L[37]*work.d[37]*1; residual += temp*temp; temp = work.KKT[77]-work.L[38]*work.d[38]*1; residual += temp*temp; temp = work.KKT[79]-work.L[39]*work.d[39]*1; residual += temp*temp; temp = work.KKT[81]-work.L[40]*work.d[40]*1; residual += temp*temp; temp = work.KKT[83]-work.L[41]*work.d[41]*1; residual += temp*temp; temp = work.KKT[85]-work.L[42]*work.d[42]*1; residual += temp*temp; temp = work.KKT[87]-work.L[43]*work.d[43]*1; residual += temp*temp; temp = work.KKT[89]-work.L[44]*work.d[44]*1; residual += temp*temp; return residual; } __device__ void matrix_multiply(double *result, double *source) { /* Finds result = A*source. */ result[0] = work.KKT[60]*source[0]+work.KKT[31]*source[30]+work.KKT[61]*source[45]; result[1] = work.KKT[62]*source[1]+work.KKT[33]*source[31]+work.KKT[63]*source[45]; result[2] = work.KKT[64]*source[2]+work.KKT[35]*source[32]+work.KKT[65]*source[45]; result[3] = work.KKT[66]*source[3]+work.KKT[37]*source[33]+work.KKT[67]*source[45]; result[4] = work.KKT[68]*source[4]+work.KKT[39]*source[34]+work.KKT[69]*source[45]; result[5] = work.KKT[70]*source[5]+work.KKT[41]*source[35]+work.KKT[71]*source[45]; result[6] = work.KKT[72]*source[6]+work.KKT[43]*source[36]+work.KKT[73]*source[45]; result[7] = work.KKT[74]*source[7]+work.KKT[45]*source[37]+work.KKT[75]*source[45]; result[8] = work.KKT[76]*source[8]+work.KKT[47]*source[38]+work.KKT[77]*source[45]; result[9] = work.KKT[78]*source[9]+work.KKT[49]*source[39]+work.KKT[79]*source[45]; result[10] = work.KKT[80]*source[10]+work.KKT[51]*source[40]+work.KKT[81]*source[45]; result[11] = work.KKT[82]*source[11]+work.KKT[53]*source[41]+work.KKT[83]*source[45]; result[12] = work.KKT[84]*source[12]+work.KKT[55]*source[42]+work.KKT[85]*source[45]; result[13] = work.KKT[86]*source[13]+work.KKT[57]*source[43]+work.KKT[87]*source[45]; result[14] = work.KKT[88]*source[14]+work.KKT[59]*source[44]+work.KKT[89]*source[45]; result[15] = work.KKT[0]*source[15]+work.KKT[1]*source[30]; result[16] = work.KKT[2]*source[16]+work.KKT[3]*source[31]; result[17] = work.KKT[4]*source[17]+work.KKT[5]*source[32]; result[18] = work.KKT[6]*source[18]+work.KKT[7]*source[33]; result[19] = work.KKT[8]*source[19]+work.KKT[9]*source[34]; result[20] = work.KKT[10]*source[20]+work.KKT[11]*source[35]; result[21] = work.KKT[12]*source[21]+work.KKT[13]*source[36]; result[22] = work.KKT[14]*source[22]+work.KKT[15]*source[37]; result[23] = work.KKT[16]*source[23]+work.KKT[17]*source[38]; result[24] = work.KKT[18]*source[24]+work.KKT[19]*source[39]; result[25] = work.KKT[20]*source[25]+work.KKT[21]*source[40]; result[26] = work.KKT[22]*source[26]+work.KKT[23]*source[41]; result[27] = work.KKT[24]*source[27]+work.KKT[25]*source[42]; result[28] = work.KKT[26]*source[28]+work.KKT[27]*source[43]; result[29] = work.KKT[28]*source[29]+work.KKT[29]*source[44]; result[30] = work.KKT[1]*source[15]+work.KKT[30]*source[30]+work.KKT[31]*source[0]; result[31] = work.KKT[3]*source[16]+work.KKT[32]*source[31]+work.KKT[33]*source[1]; result[32] = work.KKT[5]*source[17]+work.KKT[34]*source[32]+work.KKT[35]*source[2]; result[33] = work.KKT[7]*source[18]+work.KKT[36]*source[33]+work.KKT[37]*source[3]; result[34] = work.KKT[9]*source[19]+work.KKT[38]*source[34]+work.KKT[39]*source[4]; result[35] = work.KKT[11]*source[20]+work.KKT[40]*source[35]+work.KKT[41]*source[5]; result[36] = work.KKT[13]*source[21]+work.KKT[42]*source[36]+work.KKT[43]*source[6]; result[37] = work.KKT[15]*source[22]+work.KKT[44]*source[37]+work.KKT[45]*source[7]; result[38] = work.KKT[17]*source[23]+work.KKT[46]*source[38]+work.KKT[47]*source[8]; result[39] = work.KKT[19]*source[24]+work.KKT[48]*source[39]+work.KKT[49]*source[9]; result[40] = work.KKT[21]*source[25]+work.KKT[50]*source[40]+work.KKT[51]*source[10]; result[41] = work.KKT[23]*source[26]+work.KKT[52]*source[41]+work.KKT[53]*source[11]; result[42] = work.KKT[25]*source[27]+work.KKT[54]*source[42]+work.KKT[55]*source[12]; result[43] = work.KKT[27]*source[28]+work.KKT[56]*source[43]+work.KKT[57]*source[13]; result[44] = work.KKT[29]*source[29]+work.KKT[58]*source[44]+work.KKT[59]*source[14]; result[45] = work.KKT[61]*source[0]+work.KKT[63]*source[1]+work.KKT[65]*source[2]+work.KKT[67]*source[3]+work.KKT[69]*source[4]+work.KKT[71]*source[5]+work.KKT[73]*source[6]+work.KKT[75]*source[7]+work.KKT[77]*source[8]+work.KKT[79]*source[9]+work.KKT[81]*source[10]+work.KKT[83]*source[11]+work.KKT[85]*source[12]+work.KKT[87]*source[13]+work.KKT[89]*source[14]; } __device__ double check_residual(double *target, double *multiplicand) { /* Returns the squared 2-norm of lhs - A*rhs. */ /* Reuses v to find the residual. */ int i; double residual; residual = 0; matrix_multiply(work.v, multiplicand); for (i = 0; i < 15; i++) { residual += (target[i] - work.v[i])*(target[i] - work.v[i]); } return residual; } __device__ void fill_KKT(void) { work.KKT[60] = 2; work.KKT[62] = 2; work.KKT[64] = 2; work.KKT[66] = 2; work.KKT[68] = 2; work.KKT[70] = 2; work.KKT[72] = 2; work.KKT[74] = 2; work.KKT[76] = 2; work.KKT[78] = 2; work.KKT[80] = 2; work.KKT[82] = 2; work.KKT[84] = 2; work.KKT[86] = 2; work.KKT[88] = 2; work.KKT[0] = work.s_inv_z[0]; work.KKT[2] = work.s_inv_z[1]; work.KKT[4] = work.s_inv_z[2]; work.KKT[6] = work.s_inv_z[3]; work.KKT[8] = work.s_inv_z[4]; work.KKT[10] = work.s_inv_z[5]; work.KKT[12] = work.s_inv_z[6]; work.KKT[14] = work.s_inv_z[7]; work.KKT[16] = work.s_inv_z[8]; work.KKT[18] = work.s_inv_z[9]; work.KKT[20] = work.s_inv_z[10]; work.KKT[22] = work.s_inv_z[11]; work.KKT[24] = work.s_inv_z[12]; work.KKT[26] = work.s_inv_z[13]; work.KKT[28] = work.s_inv_z[14]; work.KKT[1] = 1; work.KKT[3] = 1; work.KKT[5] = 1; work.KKT[7] = 1; work.KKT[9] = 1; work.KKT[11] = 1; work.KKT[13] = 1; work.KKT[15] = 1; work.KKT[17] = 1; work.KKT[19] = 1; work.KKT[21] = 1; work.KKT[23] = 1; work.KKT[25] = 1; work.KKT[27] = 1; work.KKT[29] = 1; work.KKT[30] = work.block_33[0]; work.KKT[32] = work.block_33[0]; work.KKT[34] = work.block_33[0]; work.KKT[36] = work.block_33[0]; work.KKT[38] = work.block_33[0]; work.KKT[40] = work.block_33[0]; work.KKT[42] = work.block_33[0]; work.KKT[44] = work.block_33[0]; work.KKT[46] = work.block_33[0]; work.KKT[48] = work.block_33[0]; work.KKT[50] = work.block_33[0]; work.KKT[52] = work.block_33[0]; work.KKT[54] = work.block_33[0]; work.KKT[56] = work.block_33[0]; work.KKT[58] = work.block_33[0]; work.KKT[31] = -1; work.KKT[33] = -1; work.KKT[35] = -1; work.KKT[37] = -1; work.KKT[39] = -1; work.KKT[41] = -1; work.KKT[43] = -1; work.KKT[45] = -1; work.KKT[47] = -1; work.KKT[49] = -1; work.KKT[51] = -1; work.KKT[53] = -1; work.KKT[55] = -1; work.KKT[57] = -1; work.KKT[59] = -1; work.KKT[61] = 1; work.KKT[63] = 1; work.KKT[65] = 1; work.KKT[67] = 1; work.KKT[69] = 1; work.KKT[71] = 1; work.KKT[73] = 1; work.KKT[75] = 1; work.KKT[77] = 1; work.KKT[79] = 1; work.KKT[81] = 1; work.KKT[83] = 1; work.KKT[85] = 1; work.KKT[87] = 1; work.KKT[89] = 1; } }; __device__ int getGlobalIdx_1D_1D(){ return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int getGlobalIdx_2D_1D(){ int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; return threadId; } __global__ void update_S( double* S, double* DataMatrix, double* Centroids, double ThresholdValue, int numRows, int numFeatures){ extern __shared__ double normBuffer[]; int i = blockIdx.x; int k = blockIdx.y; int f = threadIdx.x; int tid = threadIdx.x; int numCentroids = gridDim.y; // // if(i == 0 && k == 0 && f == 0){ // printf("i: %d, k: %d, f: %d\n", gridDim.x, gridDim.y, blockDim.x); // } normBuffer[f] = (DataMatrix[deref(i, f, numFeatures)] - Centroids[deref(k, f, numFeatures)]) * (DataMatrix[deref(i, f, numFeatures)] - Centroids[deref(k, f, numFeatures)]); __syncthreads(); //this syncthreads is essential //this section of code sums down our stored scalars * features into 1 feature of one centroid for(int step = 1; step < numFeatures; step*=2){ while( (tid+step < numFeatures) ){ int ndx = 2 * step * tid; if( ndx + step < numFeatures){ normBuffer[ndx] += normBuffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); normBuffer[0] = sqrt(normBuffer[0]); // if(i == 5 && k == 3 && f == 0){ // printf("i: %d, k: %d, normBuffer[%d] = %f\n", i, k, f, sqrt(normBuffer[f]) ); // } if(normBuffer[0] == 0){ //do nothing so S[i][k] } else if( normBuffer[0] > ThresholdValue){ S[deref(i,k,numCentroids)] = 0; } else{ S[deref(i,k,numCentroids)] = 1/normBuffer[0]; } } /*Loads a buffer with all our scalar values calulated from S and U and used to multiply with a given centroid K*/ __global__ void load_scalar_buffer(double* ScalarBuffer, double* S, double* U, int numRows, int numCentroids){ int k = blockIdx.x; int tid = threadIdx.x; for(int i = tid; i < numRows; i += blockDim.x){ ScalarBuffer[deref(k, i, numRows)] = S[deref(i, k, numCentroids)] * U[deref(i, k, numCentroids)]; } __syncthreads(); } __global__ void calculate_centroids(double* DataMatrix, double* V, double* ScalarBuffer, int numRows, int numFeatures){ extern __shared__ double localbuffer[]; int k = blockIdx.x; int f = blockIdx.y; int tid = threadIdx.x; // if(k == 0 && f == 0 && tid == 0){ // printf("k: %d, f: %d, i: %d\n", gridDim.x, gridDim.y, blockDim.x); // } //this shared memory buffer represents a stack of scalars * a feature for all rows in my data matrix // used as a storage for summing in the next step for(int i = tid; i < numRows; i += blockDim.x){ localbuffer[i] = ScalarBuffer[deref(k, i, numRows)] * DataMatrix[deref(i,f,numFeatures)]; } __syncthreads(); //this syncthreads is essential //this section of code sums down our stored scalars * features into 1 feature of one centroid for(int step = 1; step < numRows; step*=2){ while( (tid+step < numRows) ){ int ndx = 2 * step * tid; if( ndx + step < numRows){ localbuffer[ndx] += localbuffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); // if(k==2 && tid == 0){ // printf("V[%d][%d] = %f \n", k, f, localbuffer[0]); // } //store in global memory for later use V[deref(k,f,numFeatures)] = localbuffer[0]; } __global__ void find_centroids(double* DataMatrix, double* V, double* ScalarBuffer, int numRows, int numFeatures){ int gtid = getGlobalIdx_2D_1D(); int k = blockIdx.x; int i = threadIdx.x; int f = threadIdx.x; // if(k==3 && f < numFeatures){ // printf("V[%d][%d] = %f \n", k, f, V[deref(k,f,numRows)]); // } //Sum working for(int step = 1; step < numRows; step*=2){ while( (i+step < numRows) ){ int ndx = 2 * step * i; if(ndx+step < numRows){ ScalarBuffer[deref(k, ndx, numRows)] += ScalarBuffer[deref(k, ndx+step, numRows)]; } i += blockDim.x; } i = threadIdx.x; __syncthreads(); } __syncthreads(); //if(ScalarBuffer[deref(k, 0, numRows)] != 0.0){ double normalize = (1/ScalarBuffer[deref(k, 0, numRows)]); if(f < numFeatures){ V[deref(k, f, numFeatures)] *= normalize; } //} __syncthreads(); // if(k==3 && f < numFeatures){ // printf("V[%d][%d] = %f, ScalarBuffer: %f, Normalize: %f \n", k, f, V[deref(k,f,numFeatures)], ScalarBuffer[deref(k, 0, numRows)], normalize); // // } __syncthreads(); } __global__ void init_S(double* S, int numCols){ int i = blockIdx.x; int k = threadIdx.x; S[deref(i,k,numCols)] = 1.0; } __global__ void build_h_matrix(double* H, double* DataMatrix, double* S, double* Centroids){ int gtid = getGlobalIdx_2D_1D(); int k = blockIdx.x; int i = blockIdx.y; int f = threadIdx.x; int tid = threadIdx.x; int numCentroids = gridDim.x; int numFeatures = blockDim.x; __shared__ double buffer[50]; //unlikely to have more than 50 features //Get Square Subtracted vectors //part of the norm buffer[f] = (DataMatrix[deref(i,f,numFeatures)] - Centroids[deref(k,f,numFeatures)]) * (DataMatrix[deref(i,f,numFeatures)] - Centroids[deref(k,f,numFeatures)]) ; // if( i == 0 && k == 0 ){ // printf("%d, %d, Centroid[%d]: %f\n", i, k, f, Centroids[deref(k,f,numFeatures)]); // } __syncthreads(); for(int step = 1; step < numFeatures; step*=2){ while( (tid+step < numFeatures) ){ int ndx = 2 * step * tid; if( ndx + step < numFeatures){ buffer[ndx] += buffer[ndx+step]; } tid += blockDim.x; } tid = threadIdx.x; __syncthreads(); } __syncthreads(); // if( i == 0 && k == 0 && f == 0){ // printf("%d, %d, Buffer[%d]: %f\n", i, k, f, buffer[f]); // } // if( i == 0 && k == 0 && f == 0 ){ // printf("Summed buffer for S[%d][%d]: %f\n", i, k, S[deref(i,k,numCentroids)]); // } if(f == 0){ H[deref(i,k,numCentroids)] = buffer[0] * S[deref(i,k,numCentroids)]; } __syncthreads(); // if( i == 0 && k == 0 && f == 0 ){ // printf("Summed buffer for H[%d][%d]: %f\n", i, k, H[deref(i,k,numCentroids)]); // } } __global__ void update_membership_matrix( double* U_GPU, double* H, double RegParam, int numClusters, int numRows){ /* Get Specific Thread Assignment Data */ int tid = blockIdx.x; if(tid < numRows){ solver_scope solver(tid); solver.set_defaults(); // Set basic algorithm parameters. solver.setup_indexing(); // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // printf("H[%d] = %f;\n", i, H[deref(tid, i, numClusters)]); // } // load one line of the h matrix // calculate the h_tilde in here double multiplicand = (-1/(2*RegParam)); for(int i = 0; i < numClusters; i += blockDim.y){ solver.params.Hi[i] = H[deref(tid,i, numClusters)] * multiplicand; } // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // printf("params.Hi[%d] = %f;\n", i, solver.params.Hi[i]); // } // Solve our problem at high speed! solver.solve(); //use_solution(vars, params, tid); for(int i = 0; i < numClusters; i += blockDim.y){ U_GPU[deref(tid, i, numClusters)] = (double) solver.vars.Ui[i]; } __syncthreads(); // for(int i = 0; i < numClusters && tid == 0; i += blockDim.y){ // if(! isnan(U_GPU[deref(tid, i, numClusters)]) ) // printf("U_GPU[%d][%d] = %f\n", tid, i, U_GPU[deref(tid, i, numClusters)]); // } } }
f93aabae4f9be871dd2bd9574305ba173c6c4194.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_y2; int xdim0_advec_mom_kernel_y2_h = -1; __constant__ int ydim0_advec_mom_kernel_y2; int ydim0_advec_mom_kernel_y2_h = -1; __constant__ int xdim1_advec_mom_kernel_y2; int xdim1_advec_mom_kernel_y2_h = -1; __constant__ int ydim1_advec_mom_kernel_y2; int ydim1_advec_mom_kernel_y2_h = -1; __constant__ int xdim2_advec_mom_kernel_y2; int xdim2_advec_mom_kernel_y2_h = -1; __constant__ int ydim2_advec_mom_kernel_y2; int ydim2_advec_mom_kernel_y2_h = -1; __constant__ int xdim3_advec_mom_kernel_y2; int xdim3_advec_mom_kernel_y2_h = -1; __constant__ int ydim3_advec_mom_kernel_y2; int ydim3_advec_mom_kernel_y2_h = -1; __constant__ int xdim4_advec_mom_kernel_y2; int xdim4_advec_mom_kernel_y2_h = -1; __constant__ int ydim4_advec_mom_kernel_y2; int ydim4_advec_mom_kernel_y2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_y2*(y)+xdim0_advec_mom_kernel_y2*ydim0_advec_mom_kernel_y2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_y2*(y)+xdim1_advec_mom_kernel_y2*ydim1_advec_mom_kernel_y2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_y2*(y)+xdim2_advec_mom_kernel_y2*ydim2_advec_mom_kernel_y2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_y2*(y)+xdim3_advec_mom_kernel_y2*ydim3_advec_mom_kernel_y2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_y2*(y)+xdim4_advec_mom_kernel_y2*ydim4_advec_mom_kernel_y2*(z)) //user function __device__ inline void advec_mom_kernel_y2_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x,const double *vol_flux_y) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)] ; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_y2( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_y2 + idx_z * 1*1 * xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_y2 + idx_z * 1*1 * xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_y2 + idx_z * 1*1 * xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_y2 + idx_z * 1*1 * xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel_y2 + idx_z * 1*1 * xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_y2_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_y2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_y2"); OPS_kernels[124].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_y2_h || ydim0 != ydim0_advec_mom_kernel_y2_h || xdim1 != xdim1_advec_mom_kernel_y2_h || ydim1 != ydim1_advec_mom_kernel_y2_h || xdim2 != xdim2_advec_mom_kernel_y2_h || ydim2 != ydim2_advec_mom_kernel_y2_h || xdim3 != xdim3_advec_mom_kernel_y2_h || ydim3 != ydim3_advec_mom_kernel_y2_h || xdim4 != xdim4_advec_mom_kernel_y2_h || ydim4 != ydim4_advec_mom_kernel_y2_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel_y2, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_y2_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel_y2, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_y2_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel_y2, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_y2_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel_y2, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_y2_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel_y2, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_y2_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel_y2, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_y2_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel_y2, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_y2_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel_y2, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_y2_h = ydim3; hipMemcpyToSymbol( xdim4_advec_mom_kernel_y2, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_y2_h = xdim4; hipMemcpyToSymbol( ydim4_advec_mom_kernel_y2, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_y2_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_y2), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[124].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_y2_execute; if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_y2"); } ops_enqueue_kernel(desc); } #endif
f93aabae4f9be871dd2bd9574305ba173c6c4194.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_y2; int xdim0_advec_mom_kernel_y2_h = -1; __constant__ int ydim0_advec_mom_kernel_y2; int ydim0_advec_mom_kernel_y2_h = -1; __constant__ int xdim1_advec_mom_kernel_y2; int xdim1_advec_mom_kernel_y2_h = -1; __constant__ int ydim1_advec_mom_kernel_y2; int ydim1_advec_mom_kernel_y2_h = -1; __constant__ int xdim2_advec_mom_kernel_y2; int xdim2_advec_mom_kernel_y2_h = -1; __constant__ int ydim2_advec_mom_kernel_y2; int ydim2_advec_mom_kernel_y2_h = -1; __constant__ int xdim3_advec_mom_kernel_y2; int xdim3_advec_mom_kernel_y2_h = -1; __constant__ int ydim3_advec_mom_kernel_y2; int ydim3_advec_mom_kernel_y2_h = -1; __constant__ int xdim4_advec_mom_kernel_y2; int xdim4_advec_mom_kernel_y2_h = -1; __constant__ int ydim4_advec_mom_kernel_y2; int ydim4_advec_mom_kernel_y2_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_y2*(y)+xdim0_advec_mom_kernel_y2*ydim0_advec_mom_kernel_y2*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_y2*(y)+xdim1_advec_mom_kernel_y2*ydim1_advec_mom_kernel_y2*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_y2*(y)+xdim2_advec_mom_kernel_y2*ydim2_advec_mom_kernel_y2*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_y2*(y)+xdim3_advec_mom_kernel_y2*ydim3_advec_mom_kernel_y2*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_mom_kernel_y2*(y)+xdim4_advec_mom_kernel_y2*ydim4_advec_mom_kernel_y2*(z)) //user function __device__ inline void advec_mom_kernel_y2_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x,const double *vol_flux_y) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)] ; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_y[OPS_ACC4(0,1,0)] - vol_flux_y[OPS_ACC4(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_y2( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_y2 + idx_z * 1*1 * xdim0_advec_mom_kernel_y2 * ydim0_advec_mom_kernel_y2; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_y2 + idx_z * 1*1 * xdim1_advec_mom_kernel_y2 * ydim1_advec_mom_kernel_y2; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_y2 + idx_z * 1*1 * xdim2_advec_mom_kernel_y2 * ydim2_advec_mom_kernel_y2; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_y2 + idx_z * 1*1 * xdim3_advec_mom_kernel_y2 * ydim3_advec_mom_kernel_y2; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_mom_kernel_y2 + idx_z * 1*1 * xdim4_advec_mom_kernel_y2 * ydim4_advec_mom_kernel_y2; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_y2_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_y2_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_y2"); OPS_kernels[124].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_y2_h || ydim0 != ydim0_advec_mom_kernel_y2_h || xdim1 != xdim1_advec_mom_kernel_y2_h || ydim1 != ydim1_advec_mom_kernel_y2_h || xdim2 != xdim2_advec_mom_kernel_y2_h || ydim2 != ydim2_advec_mom_kernel_y2_h || xdim3 != xdim3_advec_mom_kernel_y2_h || ydim3 != ydim3_advec_mom_kernel_y2_h || xdim4 != xdim4_advec_mom_kernel_y2_h || ydim4 != ydim4_advec_mom_kernel_y2_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel_y2, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_y2_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel_y2, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_y2_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel_y2, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_y2_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel_y2, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_y2_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel_y2, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_y2_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel_y2, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_y2_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel_y2, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_y2_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel_y2, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_y2_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_mom_kernel_y2, &xdim4, sizeof(int) ); xdim4_advec_mom_kernel_y2_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_mom_kernel_y2, &ydim4, sizeof(int) ); ydim4_advec_mom_kernel_y2_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_y2<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[124].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_y2(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_y2_execute; if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_y2"); } ops_enqueue_kernel(desc); } #endif
5087e5262e95b3abbd46d170745ea2ff242596bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; #include "kernels/reduce.cuh" #include "pagerank_push_cuda.cuh" static const int __tb_PageRank = TB_SIZE; __global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_value[src] = 0; p_nout[src] = 0; p_residual[src] = 0; p_delta[src] = 0; } } // FP: "10 -> 11; } __global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, DynamicBitset& bitset_nout) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; uint32_t num_edges; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_residual[src] = local_alpha; num_edges = graph.getOutDegree(src); atomicTestAdd(&p_nout[src], num_edges); bitset_nout.set(src); } } // FP: "11 -> 12; } __global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; float residual_old; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_residual[src] > 0) { residual_old = p_residual[src]; p_residual[src] = 0; p_value[src] += residual_old; if (residual_old > local_tolerance) { if (p_nout[src] > 0) { p_delta[src] = residual_old * (1 - local_alpha) / p_nout[src]; } } } } } // FP: "15 -> 16; } __global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual, DynamicBitset& bitset_residual, HGAccumulator<unsigned int> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_PageRank; float _delta; __shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; // FP: "7 -> 8; DGAccumulator_accum.thread_entry(); // FP: "8 -> 9; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "9 -> 10; bool pop = src < __end; // FP: "10 -> 11; if (pop) { if (p_delta[src] > 0) { _delta = p_delta[src]; p_delta[src] = 0; DGAccumulator_accum.reduce( 1); } else { pop = false; } } // FP: "17 -> 18; // FP: "20 -> 21; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "21 -> 22; __shared__ struct { float _delta; } _np_closure [TB_SIZE]; // FP: "22 -> 23; _np_closure[threadIdx.x]._delta = _delta; // FP: "23 -> 24; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "26 -> 27; // FP: "27 -> 28; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "28 -> 29; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "29 -> 30; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "32 -> 33; __syncthreads(); // FP: "33 -> 34; while (true) { // FP: "34 -> 35; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "39 -> 40; __syncthreads(); // FP: "40 -> 41; break; } // FP: "42 -> 43; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "45 -> 46; __syncthreads(); // FP: "46 -> 47; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "47 -> 48; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "50 -> 51; assert(nps.tb.src < __kernel_tb_size); _delta = _np_closure[nps.tb.src]._delta; // FP: "51 -> 52; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type nbr; nbr = ns +_np_j; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } // FP: "59 -> 60; __syncthreads(); } // FP: "61 -> 62; // FP: "62 -> 63; { const int warpid = threadIdx.x / 32; // FP: "63 -> 64; const int _np_laneid = cub::LaneId(); // FP: "64 -> 65; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); _delta = _np_closure[nps.warp.src[warpid]]._delta; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type nbr; nbr = _np_w_start +_np_ii; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } } // FP: "82 -> 83; __syncthreads(); // FP: "83 -> 84; } // FP: "84 -> 85; __syncthreads(); // FP: "85 -> 86; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "86 -> 87; while (_np.work()) { // FP: "87 -> 88; int _np_i =0; // FP: "88 -> 89; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "89 -> 90; __syncthreads(); // FP: "90 -> 91; // FP: "91 -> 92; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type nbr; assert(nps.fg.src[_np_i] < __kernel_tb_size); _delta = _np_closure[nps.fg.src[_np_i]]._delta; nbr= nps.fg.itvalue[_np_i]; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } // FP: "100 -> 101; _np.execute_round_done(ITSIZE); // FP: "101 -> 102; __syncthreads(); } // FP: "103 -> 104; assert(threadIdx.x < __kernel_tb_size); _delta = _np_closure[threadIdx.x]._delta; } // FP: "106 -> 107; DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "107 -> 108; } __global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts; __shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_residual_over_tolerance.thread_entry(); // FP: "3 -> 4; // FP: "4 -> 5; DGAccumulator_sum.thread_entry(); // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_sum_residual.thread_entry(); // FP: "7 -> 8; // FP: "8 -> 9; max_residual.thread_entry(); // FP: "9 -> 10; // FP: "10 -> 11; max_value.thread_entry(); // FP: "11 -> 12; // FP: "12 -> 13; min_residual.thread_entry(); // FP: "13 -> 14; // FP: "14 -> 15; min_value.thread_entry(); // FP: "15 -> 16; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { max_value.reduce(p_value[src]); min_value.reduce(p_value[src]); max_residual.reduce(p_residual[src]); min_residual.reduce(p_residual[src]); DGAccumulator_sum.reduce( p_value[src]); DGAccumulator_sum.reduce( p_residual[src]); if (p_residual[src] > local_tolerance) { DGAccumulator_residual_over_tolerance.reduce( 1); } } } // FP: "29 -> 30; DGAccumulator_residual_over_tolerance.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts); // FP: "30 -> 31; DGAccumulator_sum.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts); // FP: "31 -> 32; DGAccumulator_sum_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts); // FP: "32 -> 33; max_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_residual_ts); // FP: "33 -> 34; max_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(max_value_ts); // FP: "34 -> 35; min_residual.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_residual_ts); // FP: "35 -> 36; min_value.thread_exit<hipcub::BlockReduce<float, TB_SIZE> >(min_value_ts); // FP: "36 -> 37; } void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( ResetGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void ResetGraph_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void ResetGraph_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void ResetGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr())); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx); // FP: "2 -> 3; } void InitializeGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx); // FP: "2 -> 3; } void InitializeGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx); // FP: "2 -> 3; } void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( PageRank_delta) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void PageRank_delta_allNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->gg.nnodes, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_delta_masterNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_delta_nodesWithEdges_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<unsigned int> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; hipLaunchKernelGGL(( PageRank) , dim3(blocks), dim3(__tb_PageRank), 0, 0, ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->residual.is_updated.gpu_rd_ptr()), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void PageRank_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRank_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRank_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance; HGAccumulator<float> _DGAccumulator_sum; HGAccumulator<float> _DGAccumulator_sum_residual; HGReduceMax<float> _max_residual; HGReduceMax<float> _max_value; HGReduceMin<float> _min_residual; HGReduceMin<float> _min_value; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr(); // FP: "8 -> 9; Shared<float> DGAccumulator_sumval = Shared<float>(1); // FP: "9 -> 10; // FP: "10 -> 11; *(DGAccumulator_sumval.cpu_wr_ptr()) = 0; // FP: "11 -> 12; _DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr(); // FP: "12 -> 13; Shared<float> DGAccumulator_sum_residualval = Shared<float>(1); // FP: "13 -> 14; // FP: "14 -> 15; *(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0; // FP: "15 -> 16; _DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr(); // FP: "16 -> 17; Shared<float> max_residualval = Shared<float>(1); // FP: "17 -> 18; // FP: "18 -> 19; *(max_residualval.cpu_wr_ptr()) = 0; // FP: "19 -> 20; _max_residual.rv = max_residualval.gpu_wr_ptr(); // FP: "20 -> 21; Shared<float> max_valueval = Shared<float>(1); // FP: "21 -> 22; // FP: "22 -> 23; *(max_valueval.cpu_wr_ptr()) = 0; // FP: "23 -> 24; _max_value.rv = max_valueval.gpu_wr_ptr(); // FP: "24 -> 25; Shared<float> min_residualval = Shared<float>(1); // FP: "25 -> 26; // FP: "26 -> 27; *(min_residualval.cpu_wr_ptr()) = 0; // FP: "27 -> 28; _min_residual.rv = min_residualval.gpu_wr_ptr(); // FP: "28 -> 29; Shared<float> min_valueval = Shared<float>(1); // FP: "29 -> 30; // FP: "30 -> 31; *(min_valueval.cpu_wr_ptr()) = 0; // FP: "31 -> 32; _min_value.rv = min_valueval.gpu_wr_ptr(); // FP: "32 -> 33; hipLaunchKernelGGL(( PageRankSanity) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value); // FP: "33 -> 34; check_cuda_kernel; // FP: "34 -> 35; DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr()); // FP: "35 -> 36; DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr()); // FP: "36 -> 37; DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr()); // FP: "37 -> 38; max_residual = *(max_residualval.cpu_rd_ptr()); // FP: "38 -> 39; max_value = *(max_valueval.cpu_rd_ptr()); // FP: "39 -> 40; min_residual = *(min_residualval.cpu_rd_ptr()); // FP: "40 -> 41; min_value = *(min_valueval.cpu_rd_ptr()); // FP: "41 -> 42; } void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; } void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; } void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; }
5087e5262e95b3abbd46d170745ea2ff242596bc.cu
/* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; #include "kernels/reduce.cuh" #include "pagerank_push_cuda.cuh" static const int __tb_PageRank = TB_SIZE; __global__ void ResetGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_value[src] = 0; p_nout[src] = 0; p_residual[src] = 0; p_delta[src] = 0; } } // FP: "10 -> 11; } __global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, uint32_t * p_nout, float * p_residual, DynamicBitset& bitset_nout) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; uint32_t num_edges; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_residual[src] = local_alpha; num_edges = graph.getOutDegree(src); atomicTestAdd(&p_nout[src], num_edges); bitset_nout.set(src); } } // FP: "11 -> 12; } __global__ void PageRank_delta(CSRGraph graph, unsigned int __begin, unsigned int __end, const float local_alpha, float local_tolerance, float * p_delta, uint32_t * p_nout, float * p_residual, float * p_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; float residual_old; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_residual[src] > 0) { residual_old = p_residual[src]; p_residual[src] = 0; p_value[src] += residual_old; if (residual_old > local_tolerance) { if (p_nout[src] > 0) { p_delta[src] = residual_old * (1 - local_alpha) / p_nout[src]; } } } } } // FP: "15 -> 16; } __global__ void PageRank(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_delta, float * p_residual, DynamicBitset& bitset_residual, HGAccumulator<unsigned int> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_PageRank; float _delta; __shared__ cub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; // FP: "7 -> 8; DGAccumulator_accum.thread_entry(); // FP: "8 -> 9; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "9 -> 10; bool pop = src < __end; // FP: "10 -> 11; if (pop) { if (p_delta[src] > 0) { _delta = p_delta[src]; p_delta[src] = 0; DGAccumulator_accum.reduce( 1); } else { pop = false; } } // FP: "17 -> 18; // FP: "20 -> 21; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "21 -> 22; __shared__ struct { float _delta; } _np_closure [TB_SIZE]; // FP: "22 -> 23; _np_closure[threadIdx.x]._delta = _delta; // FP: "23 -> 24; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "26 -> 27; // FP: "27 -> 28; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "28 -> 29; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "29 -> 30; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "32 -> 33; __syncthreads(); // FP: "33 -> 34; while (true) { // FP: "34 -> 35; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "39 -> 40; __syncthreads(); // FP: "40 -> 41; break; } // FP: "42 -> 43; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "45 -> 46; __syncthreads(); // FP: "46 -> 47; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "47 -> 48; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "50 -> 51; assert(nps.tb.src < __kernel_tb_size); _delta = _np_closure[nps.tb.src]._delta; // FP: "51 -> 52; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type nbr; nbr = ns +_np_j; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } // FP: "59 -> 60; __syncthreads(); } // FP: "61 -> 62; // FP: "62 -> 63; { const int warpid = threadIdx.x / 32; // FP: "63 -> 64; const int _np_laneid = cub::LaneId(); // FP: "64 -> 65; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); _delta = _np_closure[nps.warp.src[warpid]]._delta; for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type nbr; nbr = _np_w_start +_np_ii; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } } // FP: "82 -> 83; __syncthreads(); // FP: "83 -> 84; } // FP: "84 -> 85; __syncthreads(); // FP: "85 -> 86; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "86 -> 87; while (_np.work()) { // FP: "87 -> 88; int _np_i =0; // FP: "88 -> 89; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "89 -> 90; __syncthreads(); // FP: "90 -> 91; // FP: "91 -> 92; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type nbr; assert(nps.fg.src[_np_i] < __kernel_tb_size); _delta = _np_closure[nps.fg.src[_np_i]]._delta; nbr= nps.fg.itvalue[_np_i]; { index_type dst; dst = graph.getAbsDestination(nbr); atomicTestAdd(&p_residual[dst], _delta); bitset_residual.set(dst); } } // FP: "100 -> 101; _np.execute_round_done(ITSIZE); // FP: "101 -> 102; __syncthreads(); } // FP: "103 -> 104; assert(threadIdx.x < __kernel_tb_size); _delta = _np_closure[threadIdx.x]._delta; } // FP: "106 -> 107; DGAccumulator_accum.thread_exit<cub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "107 -> 108; } __global__ void PageRankSanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float local_tolerance, float * p_residual, float * p_value, HGAccumulator<uint64_t> DGAccumulator_residual_over_tolerance, HGAccumulator<float> DGAccumulator_sum, HGAccumulator<float> DGAccumulator_sum_residual, HGReduceMax<float> max_residual, HGReduceMax<float> max_value, HGReduceMin<float> min_residual, HGReduceMin<float> min_value) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ cub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_residual_over_tolerance_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_residual_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_residual_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage max_value_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_residual_ts; __shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage min_value_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_residual_over_tolerance.thread_entry(); // FP: "3 -> 4; // FP: "4 -> 5; DGAccumulator_sum.thread_entry(); // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_sum_residual.thread_entry(); // FP: "7 -> 8; // FP: "8 -> 9; max_residual.thread_entry(); // FP: "9 -> 10; // FP: "10 -> 11; max_value.thread_entry(); // FP: "11 -> 12; // FP: "12 -> 13; min_residual.thread_entry(); // FP: "13 -> 14; // FP: "14 -> 15; min_value.thread_entry(); // FP: "15 -> 16; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { max_value.reduce(p_value[src]); min_value.reduce(p_value[src]); max_residual.reduce(p_residual[src]); min_residual.reduce(p_residual[src]); DGAccumulator_sum.reduce( p_value[src]); DGAccumulator_sum.reduce( p_residual[src]); if (p_residual[src] > local_tolerance) { DGAccumulator_residual_over_tolerance.reduce( 1); } } } // FP: "29 -> 30; DGAccumulator_residual_over_tolerance.thread_exit<cub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_residual_over_tolerance_ts); // FP: "30 -> 31; DGAccumulator_sum.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_ts); // FP: "31 -> 32; DGAccumulator_sum_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(DGAccumulator_sum_residual_ts); // FP: "32 -> 33; max_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_residual_ts); // FP: "33 -> 34; max_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(max_value_ts); // FP: "34 -> 35; min_residual.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_residual_ts); // FP: "35 -> 36; min_value.thread_exit<cub::BlockReduce<float, TB_SIZE> >(min_value_ts); // FP: "36 -> 37; } void ResetGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; ResetGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void ResetGraph_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void ResetGraph_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void ResetGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; ResetGraph_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->nout.is_updated.gpu_rd_ptr())); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph_allNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->gg.nnodes, local_alpha, ctx); // FP: "2 -> 3; } void InitializeGraph_masterNodes_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, ctx); // FP: "2 -> 3; } void InitializeGraph_nodesWithEdges_cuda(const float & local_alpha, struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph_cuda(0, ctx->numNodesWithEdges, local_alpha, ctx); // FP: "2 -> 3; } void PageRank_delta_cuda(unsigned int __begin, unsigned int __end, const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; PageRank_delta <<<blocks, threads>>>(ctx->gg, __begin, __end, local_alpha, local_tolerance, ctx->delta.data.gpu_wr_ptr(), ctx->nout.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void PageRank_delta_allNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->gg.nnodes, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_delta_masterNodes_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_delta_nodesWithEdges_cuda(const float & local_alpha, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_delta_cuda(0, ctx->numNodesWithEdges, local_alpha, local_tolerance, ctx); // FP: "2 -> 3; } void PageRank_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<unsigned int> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; PageRank <<<blocks, __tb_PageRank>>>(ctx->gg, __begin, __end, ctx->delta.data.gpu_wr_ptr(), ctx->residual.data.gpu_wr_ptr(), *(ctx->residual.is_updated.gpu_rd_ptr()), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void PageRank_allNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRank_masterNodes_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRank_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRank_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void PageRankSanity_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint64_t> _DGAccumulator_residual_over_tolerance; HGAccumulator<float> _DGAccumulator_sum; HGAccumulator<float> _DGAccumulator_sum_residual; HGReduceMax<float> _max_residual; HGReduceMax<float> _max_value; HGReduceMin<float> _min_residual; HGReduceMin<float> _min_value; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint64_t> DGAccumulator_residual_over_toleranceval = Shared<uint64_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_residual_over_toleranceval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_residual_over_tolerance.rv = DGAccumulator_residual_over_toleranceval.gpu_wr_ptr(); // FP: "8 -> 9; Shared<float> DGAccumulator_sumval = Shared<float>(1); // FP: "9 -> 10; // FP: "10 -> 11; *(DGAccumulator_sumval.cpu_wr_ptr()) = 0; // FP: "11 -> 12; _DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr(); // FP: "12 -> 13; Shared<float> DGAccumulator_sum_residualval = Shared<float>(1); // FP: "13 -> 14; // FP: "14 -> 15; *(DGAccumulator_sum_residualval.cpu_wr_ptr()) = 0; // FP: "15 -> 16; _DGAccumulator_sum_residual.rv = DGAccumulator_sum_residualval.gpu_wr_ptr(); // FP: "16 -> 17; Shared<float> max_residualval = Shared<float>(1); // FP: "17 -> 18; // FP: "18 -> 19; *(max_residualval.cpu_wr_ptr()) = 0; // FP: "19 -> 20; _max_residual.rv = max_residualval.gpu_wr_ptr(); // FP: "20 -> 21; Shared<float> max_valueval = Shared<float>(1); // FP: "21 -> 22; // FP: "22 -> 23; *(max_valueval.cpu_wr_ptr()) = 0; // FP: "23 -> 24; _max_value.rv = max_valueval.gpu_wr_ptr(); // FP: "24 -> 25; Shared<float> min_residualval = Shared<float>(1); // FP: "25 -> 26; // FP: "26 -> 27; *(min_residualval.cpu_wr_ptr()) = 0; // FP: "27 -> 28; _min_residual.rv = min_residualval.gpu_wr_ptr(); // FP: "28 -> 29; Shared<float> min_valueval = Shared<float>(1); // FP: "29 -> 30; // FP: "30 -> 31; *(min_valueval.cpu_wr_ptr()) = 0; // FP: "31 -> 32; _min_value.rv = min_valueval.gpu_wr_ptr(); // FP: "32 -> 33; PageRankSanity <<<blocks, threads>>>(ctx->gg, __begin, __end, local_tolerance, ctx->residual.data.gpu_wr_ptr(), ctx->value.data.gpu_wr_ptr(), _DGAccumulator_residual_over_tolerance, _DGAccumulator_sum, _DGAccumulator_sum_residual, _max_residual, _max_value, _min_residual, _min_value); // FP: "33 -> 34; check_cuda_kernel; // FP: "34 -> 35; DGAccumulator_residual_over_tolerance = *(DGAccumulator_residual_over_toleranceval.cpu_rd_ptr()); // FP: "35 -> 36; DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr()); // FP: "36 -> 37; DGAccumulator_sum_residual = *(DGAccumulator_sum_residualval.cpu_rd_ptr()); // FP: "37 -> 38; max_residual = *(max_residualval.cpu_rd_ptr()); // FP: "38 -> 39; max_value = *(max_valueval.cpu_rd_ptr()); // FP: "39 -> 40; min_residual = *(min_residualval.cpu_rd_ptr()); // FP: "40 -> 41; min_value = *(min_valueval.cpu_rd_ptr()); // FP: "41 -> 42; } void PageRankSanity_allNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(0, ctx->gg.nnodes, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; } void PageRankSanity_masterNodes_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; } void PageRankSanity_nodesWithEdges_cuda(uint64_t & DGAccumulator_residual_over_tolerance, float & DGAccumulator_sum, float & DGAccumulator_sum_residual, float & max_residual, float & max_value, float & min_residual, float & min_value, float local_tolerance, struct CUDA_Context* ctx) { // FP: "1 -> 2; PageRankSanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_residual_over_tolerance, DGAccumulator_sum, DGAccumulator_sum_residual, max_residual, max_value, min_residual, min_value, local_tolerance, ctx); // FP: "2 -> 3; }
e215dfc3f3a1565a99f3e2aa8078e97917c2f3e4.hip
// !!! This is a file automatically generated by hipify!!! /* * Cuda5StepConvolutionBusiness.cpp * * Created on: 15/06/2012 * Author: jose */ #include "Cuda5StepConvolutionBusiness.cuh" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cutil.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <QTime> #include "./src/common/Controlador.h" Cuda5StepConvolutionBusiness::Cuda5StepConvolutionBusiness() { // TODO Auto-generated constructor stub } Cuda5StepConvolutionBusiness::~Cuda5StepConvolutionBusiness() { // TODO Auto-generated destructor stub } //Macro apra multiplicacin rpida de enteros #define IMUL5(a, b) __mul24(a, b) //////////////////////////////////////////////////////////////////////////////// // Configuracin del kernel //////////////////////////////////////////////////////////////////////////////// #define KERNEL_RADIUS5 1 #define KERNEL_W5 (2 * KERNEL_RADIUS5 + 1) __device__ __constant__ float d_Kernel1[KERNEL_W5]; __device__ __constant__ float d_Kernel2[KERNEL_W5]; // Assuming ROW_TILE_W, KERNEL_RADIUS5_ALIGNED and dataW // are multiples of coalescing granularity size, // all global memory operations are coalesced in convolutionRowGPU() #define ROW_TILE_W 128 #define KERNEL_RADIUS5_ALIGNED 16 // Assuming COLUMN_TILE_W and dataW are multiples // of coalescing granularity size, all global memory operations // are coalesced in convolutionColumnGPU() #define COLUMN_TILE_W 16 #define COLUMN_TILE_H 48 //////////////////////////////////////////////////////////////////////////////// // Loop unrolling templates, needed for best performance //////////////////////////////////////////////////////////////////////////////// template<int i> __device__ float convolutionRow(float *data) { return data[KERNEL_RADIUS5 - i] * d_Kernel1[i] + convolutionRow<i - 1>(data); } template<> __device__ float convolutionRow<-1>(float *data) { return 0; } template<int i> __device__ float convolutionColumn(float *data) { return data[(KERNEL_RADIUS5 - i) * COLUMN_TILE_W] * d_Kernel2[i] + convolutionColumn<i - 1>(data); } template<> __device__ float convolutionColumn<-1>(float *data) { return 0; } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowGPU5(float *d_Result, float *d_Data, int dataW, int dataH) { //Data cache __shared__ float data[KERNEL_RADIUS5 + ROW_TILE_W + KERNEL_RADIUS5]; //Current tile and apron limits, relative to row start int tileStart = IMUL5(blockIdx.x, ROW_TILE_W); int tileEnd = tileStart + ROW_TILE_W - 1; int apronStart = tileStart - KERNEL_RADIUS5; int apronEnd = tileEnd + KERNEL_RADIUS5; //Clamp tile and apron limits by image borders int tileEndClamped = min(tileEnd, dataW - 1); int apronStartClamped = max(apronStart, 0); int apronEndClamped = min(apronEnd, dataW - 1); //Row start index in d_Data[] int rowStart = IMUL5(blockIdx.y, dataW); //Aligned apron start. Assuming dataW and ROW_TILE_W are multiples //of half-warp size, rowStart + apronStartAligned is also a //multiple of half-warp size, thus having proper alignment //for coalesced d_Data[] read. int apronStartAligned = tileStart - KERNEL_RADIUS5_ALIGNED; int loadPos = apronStartAligned + threadIdx.x; //Set the entire data cache contents //Load global memory values, if indices are within the image borders, //or initialize with zeroes otherwise if (loadPos >= apronStart) { int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : 0; } //Ensure the completness of the loading stage //because results, emitted by each thread depend on the data, //loaded by another threads __syncthreads(); int writePos = tileStart + threadIdx.x; //Assuming dataW and ROW_TILE_W are multiples of half-warp size, //rowStart + tileStart is also a multiple of half-warp size, //thus having proper alignment for coalesced d_Result[] write. if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float sum = 0; #ifdef UNROLL_INNER sum = convolutionRow<2 * KERNEL_RADIUS5>(data + smemPos); #else for (int k = -KERNEL_RADIUS5; k <= KERNEL_RADIUS5; k++) sum += data[smemPos + k] * d_Kernel1[KERNEL_RADIUS5 - k]; #endif d_Result[rowStart + writePos] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionColumnGPU5(float *d_Result, float *d_Data, int dataW, int dataH, int smemStride, int gmemStride) { //Data cache __shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS5 + COLUMN_TILE_H + KERNEL_RADIUS5)]; //Current tile and apron limits, in rows int tileStart = IMUL5(blockIdx.y, COLUMN_TILE_H); int tileEnd = tileStart + COLUMN_TILE_H - 1; int apronStart = tileStart - KERNEL_RADIUS5; int apronEnd = tileEnd + KERNEL_RADIUS5; //Clamp tile and apron limits by image borders int tileEndClamped = min(tileEnd, dataH - 1); int apronStartClamped = max(apronStart, 0); int apronEndClamped = min(apronEnd, dataH - 1); //Current column index int columnStart = IMUL5(blockIdx.x, COLUMN_TILE_W) + threadIdx.x; //Shared and global memory indices for current column int smemPos = IMUL5(threadIdx.y, COLUMN_TILE_W) + threadIdx.x; int gmemPos = IMUL5(apronStart + threadIdx.y, dataW) + columnStart; //Cycle through the entire data cache //Load global memory values, if indices are within the image borders, //or initialize with zero otherwise for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : 0; smemPos += smemStride; gmemPos += gmemStride; } //Ensure the completness of the loading stage //because results, emitted by each thread depend on the data, //loaded by another threads __syncthreads(); //Shared and global memory indices for current column smemPos = IMUL5(threadIdx.y + KERNEL_RADIUS5, COLUMN_TILE_W) + threadIdx.x; gmemPos = IMUL5(tileStart + threadIdx.y , dataW) + columnStart; //Cycle through the tile body, clamped by image borders //Calculate and output the results for (int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float sum = 0; #ifdef UNROLL_INNER sum = convolutionColumn<2 * KERNEL_RADIUS5>(data + smemPos); #else for (int k = -KERNEL_RADIUS5; k <= KERNEL_RADIUS5; k++) sum += data[smemPos + IMUL5(k, COLUMN_TILE_W)] * d_Kernel2[KERNEL_RADIUS5 - k]; #endif d_Result[gmemPos] = sum; smemPos += smemStride; gmemPos += gmemStride; } } //****************************************************** //****************************************************** //PGMWORKING unsigned int width5, height5; // Image width5 and height5 //////////////////////////////////////////////////////////////////////////////// // Common host and device functions //////////////////////////////////////////////////////////////////////////////// //Round a / b to nearest higher integer value int Cuda5StepConvolutionBusiness::iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int Cuda5StepConvolutionBusiness::iDivDown(int a, int b) { return a / b; } //Align a to nearest higher multiple of b int Cuda5StepConvolutionBusiness::iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int Cuda5StepConvolutionBusiness::iAlignDown(int a, int b) { return a - a % b; } //////////////////////////////////////////////////////////////////////////////// // GPU convolution //////////////////////////////////////////////////////////////////////////////// //Global macro, controlling innermost convolution loop unrolling #define UNROLL_INNER //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// //Image width5 should be aligned to maximum coalesced read/write size //for best global memory performance in both row and column filter. int DATA_W5; int DATA_H5; int DATA_SIZE5; int KERNEL_SIZE5 = KERNEL_W5 * sizeof(float); //Carry out dummy calculations before main computation loop //in order to "warm up" the hardware/driver #define WARMUP //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// float* Cuda5StepConvolutionBusiness::convolve(float *imagen, int ancho, int alto, float *h_kernel1, float *h_kernel2, int tamFilter) { Controlador *controlador = Controlador::Instance(); float *h_DataB, *h_ResultGPU; float *d_DataA, *d_DataB; double rCPU,rGPUsum_delta, sum_ref, L1norm, gpuTime; DATA_W5 = ancho; DATA_H5 = alto; DATA_SIZE5 = DATA_W5 * DATA_H5 * sizeof(float); //Realizar la carga de la imagen h_DataB = (float *) malloc(DATA_SIZE5); h_ResultGPU = (float *) malloc(DATA_SIZE5); CUDA_SAFE_CALL(hipMalloc((void **) &d_DataA, DATA_SIZE5)); CUDA_SAFE_CALL(hipMalloc((void **) &d_DataB, DATA_SIZE5)); CUDA_SAFE_CALL(hipMemcpyToSymbol(d_Kernel1, h_kernel1, KERNEL_SIZE5)); CUDA_SAFE_CALL(hipMemcpyToSymbol(d_Kernel2, h_kernel2, KERNEL_SIZE5)); hipMemcpy(d_DataA, imagen, DATA_SIZE5, hipMemcpyHostToDevice); //******TIMER QTime *time = new QTime(); time->start(); //******** dim3 blockGridRows(iDivUp(DATA_W5, ROW_TILE_W), DATA_H5); dim3 blockGridColumns(iDivUp(DATA_W5, COLUMN_TILE_W),iDivUp(DATA_H5, COLUMN_TILE_H)); dim3 threadBlockRows(KERNEL_RADIUS5_ALIGNED + ROW_TILE_W + KERNEL_RADIUS5); dim3 threadBlockColumns(COLUMN_TILE_W, 8); CUDA_SAFE_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( convolutionRowGPU5), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, d_DataB, d_DataA, DATA_W5, DATA_H5 ); CUDA_SAFE_CALL(hipDeviceSynchronize()); hipLaunchKernelGGL(( convolutionColumnGPU5), dim3(blockGridColumns), dim3(threadBlockColumns), 0, 0, d_DataA, d_DataB, DATA_W5, DATA_H5, COLUMN_TILE_W * threadBlockColumns.y, DATA_W5 * threadBlockColumns.y ); CUDA_SAFE_CALL(hipDeviceSynchronize()); int elapsed=time->elapsed(); controlador->setGpuExecutionTime(elapsed+controlador->getGpuExecutionTime()); CUDA_SAFE_CALL(hipMemcpy(h_ResultGPU, d_DataA, DATA_SIZE5, hipMemcpyDeviceToHost)); //liberacin de memoria en cuda CUDA_SAFE_CALL(hipFree(d_DataB)); CUDA_SAFE_CALL(hipFree(d_DataA)); //free(h_ResultGPU); free(h_DataB); return h_ResultGPU; }
e215dfc3f3a1565a99f3e2aa8078e97917c2f3e4.cu
/* * Cuda5StepConvolutionBusiness.cpp * * Created on: 15/06/2012 * Author: jose */ #include "Cuda5StepConvolutionBusiness.cuh" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cutil.h> #include <cuda.h> #include <cuda_runtime.h> #include <QTime> #include "./src/common/Controlador.h" Cuda5StepConvolutionBusiness::Cuda5StepConvolutionBusiness() { // TODO Auto-generated constructor stub } Cuda5StepConvolutionBusiness::~Cuda5StepConvolutionBusiness() { // TODO Auto-generated destructor stub } //Macro apra multiplicación rápida de enteros #define IMUL5(a, b) __mul24(a, b) //////////////////////////////////////////////////////////////////////////////// // Configuración del kernel //////////////////////////////////////////////////////////////////////////////// #define KERNEL_RADIUS5 1 #define KERNEL_W5 (2 * KERNEL_RADIUS5 + 1) __device__ __constant__ float d_Kernel1[KERNEL_W5]; __device__ __constant__ float d_Kernel2[KERNEL_W5]; // Assuming ROW_TILE_W, KERNEL_RADIUS5_ALIGNED and dataW // are multiples of coalescing granularity size, // all global memory operations are coalesced in convolutionRowGPU() #define ROW_TILE_W 128 #define KERNEL_RADIUS5_ALIGNED 16 // Assuming COLUMN_TILE_W and dataW are multiples // of coalescing granularity size, all global memory operations // are coalesced in convolutionColumnGPU() #define COLUMN_TILE_W 16 #define COLUMN_TILE_H 48 //////////////////////////////////////////////////////////////////////////////// // Loop unrolling templates, needed for best performance //////////////////////////////////////////////////////////////////////////////// template<int i> __device__ float convolutionRow(float *data) { return data[KERNEL_RADIUS5 - i] * d_Kernel1[i] + convolutionRow<i - 1>(data); } template<> __device__ float convolutionRow<-1>(float *data) { return 0; } template<int i> __device__ float convolutionColumn(float *data) { return data[(KERNEL_RADIUS5 - i) * COLUMN_TILE_W] * d_Kernel2[i] + convolutionColumn<i - 1>(data); } template<> __device__ float convolutionColumn<-1>(float *data) { return 0; } //////////////////////////////////////////////////////////////////////////////// // Row convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionRowGPU5(float *d_Result, float *d_Data, int dataW, int dataH) { //Data cache __shared__ float data[KERNEL_RADIUS5 + ROW_TILE_W + KERNEL_RADIUS5]; //Current tile and apron limits, relative to row start int tileStart = IMUL5(blockIdx.x, ROW_TILE_W); int tileEnd = tileStart + ROW_TILE_W - 1; int apronStart = tileStart - KERNEL_RADIUS5; int apronEnd = tileEnd + KERNEL_RADIUS5; //Clamp tile and apron limits by image borders int tileEndClamped = min(tileEnd, dataW - 1); int apronStartClamped = max(apronStart, 0); int apronEndClamped = min(apronEnd, dataW - 1); //Row start index in d_Data[] int rowStart = IMUL5(blockIdx.y, dataW); //Aligned apron start. Assuming dataW and ROW_TILE_W are multiples //of half-warp size, rowStart + apronStartAligned is also a //multiple of half-warp size, thus having proper alignment //for coalesced d_Data[] read. int apronStartAligned = tileStart - KERNEL_RADIUS5_ALIGNED; int loadPos = apronStartAligned + threadIdx.x; //Set the entire data cache contents //Load global memory values, if indices are within the image borders, //or initialize with zeroes otherwise if (loadPos >= apronStart) { int smemPos = loadPos - apronStart; data[smemPos] = ((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ? d_Data[rowStart + loadPos] : 0; } //Ensure the completness of the loading stage //because results, emitted by each thread depend on the data, //loaded by another threads __syncthreads(); int writePos = tileStart + threadIdx.x; //Assuming dataW and ROW_TILE_W are multiples of half-warp size, //rowStart + tileStart is also a multiple of half-warp size, //thus having proper alignment for coalesced d_Result[] write. if (writePos <= tileEndClamped) { const int smemPos = writePos - apronStart; float sum = 0; #ifdef UNROLL_INNER sum = convolutionRow<2 * KERNEL_RADIUS5>(data + smemPos); #else for (int k = -KERNEL_RADIUS5; k <= KERNEL_RADIUS5; k++) sum += data[smemPos + k] * d_Kernel1[KERNEL_RADIUS5 - k]; #endif d_Result[rowStart + writePos] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution filter //////////////////////////////////////////////////////////////////////////////// __global__ void convolutionColumnGPU5(float *d_Result, float *d_Data, int dataW, int dataH, int smemStride, int gmemStride) { //Data cache __shared__ float data[COLUMN_TILE_W * (KERNEL_RADIUS5 + COLUMN_TILE_H + KERNEL_RADIUS5)]; //Current tile and apron limits, in rows int tileStart = IMUL5(blockIdx.y, COLUMN_TILE_H); int tileEnd = tileStart + COLUMN_TILE_H - 1; int apronStart = tileStart - KERNEL_RADIUS5; int apronEnd = tileEnd + KERNEL_RADIUS5; //Clamp tile and apron limits by image borders int tileEndClamped = min(tileEnd, dataH - 1); int apronStartClamped = max(apronStart, 0); int apronEndClamped = min(apronEnd, dataH - 1); //Current column index int columnStart = IMUL5(blockIdx.x, COLUMN_TILE_W) + threadIdx.x; //Shared and global memory indices for current column int smemPos = IMUL5(threadIdx.y, COLUMN_TILE_W) + threadIdx.x; int gmemPos = IMUL5(apronStart + threadIdx.y, dataW) + columnStart; //Cycle through the entire data cache //Load global memory values, if indices are within the image borders, //or initialize with zero otherwise for (int y = apronStart + threadIdx.y; y <= apronEnd; y += blockDim.y) { data[smemPos] = ((y >= apronStartClamped) && (y <= apronEndClamped)) ? d_Data[gmemPos] : 0; smemPos += smemStride; gmemPos += gmemStride; } //Ensure the completness of the loading stage //because results, emitted by each thread depend on the data, //loaded by another threads __syncthreads(); //Shared and global memory indices for current column smemPos = IMUL5(threadIdx.y + KERNEL_RADIUS5, COLUMN_TILE_W) + threadIdx.x; gmemPos = IMUL5(tileStart + threadIdx.y , dataW) + columnStart; //Cycle through the tile body, clamped by image borders //Calculate and output the results for (int y = tileStart + threadIdx.y; y <= tileEndClamped; y += blockDim.y) { float sum = 0; #ifdef UNROLL_INNER sum = convolutionColumn<2 * KERNEL_RADIUS5>(data + smemPos); #else for (int k = -KERNEL_RADIUS5; k <= KERNEL_RADIUS5; k++) sum += data[smemPos + IMUL5(k, COLUMN_TILE_W)] * d_Kernel2[KERNEL_RADIUS5 - k]; #endif d_Result[gmemPos] = sum; smemPos += smemStride; gmemPos += gmemStride; } } //****************************************************** //****************************************************** //PGMWORKING unsigned int width5, height5; // Image width5 and height5 //////////////////////////////////////////////////////////////////////////////// // Common host and device functions //////////////////////////////////////////////////////////////////////////////// //Round a / b to nearest higher integer value int Cuda5StepConvolutionBusiness::iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int Cuda5StepConvolutionBusiness::iDivDown(int a, int b) { return a / b; } //Align a to nearest higher multiple of b int Cuda5StepConvolutionBusiness::iAlignUp(int a, int b) { return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int Cuda5StepConvolutionBusiness::iAlignDown(int a, int b) { return a - a % b; } //////////////////////////////////////////////////////////////////////////////// // GPU convolution //////////////////////////////////////////////////////////////////////////////// //Global macro, controlling innermost convolution loop unrolling #define UNROLL_INNER //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// //Image width5 should be aligned to maximum coalesced read/write size //for best global memory performance in both row and column filter. int DATA_W5; int DATA_H5; int DATA_SIZE5; int KERNEL_SIZE5 = KERNEL_W5 * sizeof(float); //Carry out dummy calculations before main computation loop //in order to "warm up" the hardware/driver #define WARMUP //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// float* Cuda5StepConvolutionBusiness::convolve(float *imagen, int ancho, int alto, float *h_kernel1, float *h_kernel2, int tamFilter) { Controlador *controlador = Controlador::Instance(); float *h_DataB, *h_ResultGPU; float *d_DataA, *d_DataB; double rCPU,rGPUsum_delta, sum_ref, L1norm, gpuTime; DATA_W5 = ancho; DATA_H5 = alto; DATA_SIZE5 = DATA_W5 * DATA_H5 * sizeof(float); //Realizar la carga de la imagen h_DataB = (float *) malloc(DATA_SIZE5); h_ResultGPU = (float *) malloc(DATA_SIZE5); CUDA_SAFE_CALL(cudaMalloc((void **) &d_DataA, DATA_SIZE5)); CUDA_SAFE_CALL(cudaMalloc((void **) &d_DataB, DATA_SIZE5)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_Kernel1, h_kernel1, KERNEL_SIZE5)); CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_Kernel2, h_kernel2, KERNEL_SIZE5)); cudaMemcpy(d_DataA, imagen, DATA_SIZE5, cudaMemcpyHostToDevice); //******TIMER QTime *time = new QTime(); time->start(); //******** dim3 blockGridRows(iDivUp(DATA_W5, ROW_TILE_W), DATA_H5); dim3 blockGridColumns(iDivUp(DATA_W5, COLUMN_TILE_W),iDivUp(DATA_H5, COLUMN_TILE_H)); dim3 threadBlockRows(KERNEL_RADIUS5_ALIGNED + ROW_TILE_W + KERNEL_RADIUS5); dim3 threadBlockColumns(COLUMN_TILE_W, 8); CUDA_SAFE_CALL(cudaThreadSynchronize()); convolutionRowGPU5<<<blockGridRows, threadBlockRows>>>( d_DataB, d_DataA, DATA_W5, DATA_H5 ); CUDA_SAFE_CALL(cudaThreadSynchronize()); convolutionColumnGPU5<<<blockGridColumns, threadBlockColumns>>>( d_DataA, d_DataB, DATA_W5, DATA_H5, COLUMN_TILE_W * threadBlockColumns.y, DATA_W5 * threadBlockColumns.y ); CUDA_SAFE_CALL(cudaThreadSynchronize()); int elapsed=time->elapsed(); controlador->setGpuExecutionTime(elapsed+controlador->getGpuExecutionTime()); CUDA_SAFE_CALL(cudaMemcpy(h_ResultGPU, d_DataA, DATA_SIZE5, cudaMemcpyDeviceToHost)); //liberación de memoria en cuda CUDA_SAFE_CALL(cudaFree(d_DataB)); CUDA_SAFE_CALL(cudaFree(d_DataA)); //free(h_ResultGPU); free(h_DataB); return h_ResultGPU; }
01de2280587b6e05e30d621401be51312fa6bd92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> /* to be set in .bash_rc: PATH=$PATH:/usr/local/cuda-8.0/bin LD_LIBRARY_PATH=/usr/local/cuda-8.0/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} */ __global__ void addKernel(int* c, const int* a, const int* b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(int* c, const int* a, const int* b, int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; // Allocate GPU buffers for three vectors (two input, one output) hipMalloc((void**)&dev_c, size * sizeof(int)); hipMalloc((void**)&dev_a, size * sizeof(int)); hipMalloc((void**)&dev_b, size * sizeof(int)); // Copy input vectors from host memory to GPU buffers. hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. // 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block hipLaunchKernelGGL(( addKernel), dim3(2), dim3((size + 1) / 2), 0, 0, dev_c, dev_a, dev_b, size); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. hipDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); } int main(int argc, char** argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; addWithCuda(c, a, b, arraySize); printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]); hipDeviceReset(); return 0; }
01de2280587b6e05e30d621401be51312fa6bd92.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> /* to be set in .bash_rc: PATH=$PATH:/usr/local/cuda-8.0/bin LD_LIBRARY_PATH=/usr/local/cuda-8.0/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}} */ __global__ void addKernel(int* c, const int* a, const int* b, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < size) { c[i] = a[i] + b[i]; } } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(int* c, const int* a, const int* b, int size) { int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; // Allocate GPU buffers for three vectors (two input, one output) cudaMalloc((void**)&dev_c, size * sizeof(int)); cudaMalloc((void**)&dev_a, size * sizeof(int)); cudaMalloc((void**)&dev_b, size * sizeof(int)); // Copy input vectors from host memory to GPU buffers. cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. // 2 is number of computational blocks and (size + 1) / 2 is a number of threads in a block addKernel<<<2, (size + 1) / 2>>>(dev_c, dev_a, dev_b, size); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); } int main(int argc, char** argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; addWithCuda(c, a, b, arraySize); printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]); cudaDeviceReset(); return 0; }
57296aa6c3d647adbaea3d0d75568f83965c8040.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %zu\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %zu\n", devProp.totalConstMem); printf("Texture alignment: %zu\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
57296aa6c3d647adbaea3d0d75568f83965c8040.cu
#include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %zu\n", devProp.totalGlobalMem); printf("Total shared memory per block: %zu\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %zu\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %zu\n", devProp.totalConstMem); printf("Texture alignment: %zu\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
96a895437c68a4b0aa7f551485f5842518b4b460.hip
// !!! This is a file automatically generated by hipify!!! // On Maverick2: sbatch mvk2GPUMatMul // nvcc BrodayWalker1B.cu -o BrodayWalker1B.exe //*************************************************************************** // Name: Broday Walker // Instructor: Dr. Colmenares // Class: CMPS 5433 // Date: March 2, 2020 //*************************************************************************** // This program implements matrix multiplication using a GPU on Maverick2. // The program reports the elapsed time taken to complete the matrix // multiplication in milliseconds. It is significantly faster than its // sequential counterpart. //*************************************************************************** #include <stdio.h> #include <hip/hip_runtime.h> enum N {N = 32}; // matmulKernel performs matrix multiplication on a linearized array // This code was given in the slides and adapted for use here __global__ void matmulKernel(int *Ad, int *Bd, int *Cd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0; for (int k = 0; k < width; k++) { int Aelement = Ad[ty * width + k]; int Belement = Bd[k * width + tx]; sum += Aelement * Belement; } Cd[ty * width + tx] = sum; } int main() { // Declarations int A[N * N], B[N * N], C[N * N]; int *Ad, *Bd, *Cd; int size = N * N * sizeof(int); int sum = 0; // Declare the timer // Reference: // https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; // Fill arrays A and C // Array C will be filled with 0s for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { A[i * N + j] = i; C[i * N + j] = 0; } // Fill B int row = N - 1; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) B[i * N + j] = row; row--; } /* Allocate memory and copy to device */ hipMalloc((void**)&Ad, size); hipMemcpy(Ad, A, size, hipMemcpyHostToDevice); hipMalloc((void**)&Bd, size); hipMemcpy(Bd, B, size, hipMemcpyHostToDevice); hipMalloc((void**)&Cd, size); /* End memory allocation and copying to device */ /* Define grid and block dimensions */ dim3 dimGrid( 1, 1 ); dim3 dimBlock( N, N ); /* Record start time */ hipEventRecord(start); /* Invoke the kernel */ hipLaunchKernelGGL(( matmulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Ad, Bd, Cd, N); /* Record end time */ hipEventRecord(stop); /* Copy the matrix multiplication results from device to host */ hipMemcpy(C, Cd, size, hipMemcpyDeviceToHost); /* Block CPU execution until the specified event is recorded */ hipEventSynchronize(stop); /* Returns the elapsed time in milliseconds to the first argument */ hipEventElapsedTime(&milliseconds, start, stop); hipFree(Ad); hipFree(Bd); hipFree(Cd); // Sum the array and print the results for (int i = 0; i < N * N; i++) sum += C[i]; // Print results printf("The summation of all the elements is %d.\n", sum); // Print elapsed time printf("\nElapsed time in milliseconds: %f.\n", milliseconds); return 0; }
96a895437c68a4b0aa7f551485f5842518b4b460.cu
// On Maverick2: sbatch mvk2GPUMatMul // nvcc BrodayWalker1B.cu -o BrodayWalker1B.exe //*************************************************************************** // Name: Broday Walker // Instructor: Dr. Colmenares // Class: CMPS 5433 // Date: March 2, 2020 //*************************************************************************** // This program implements matrix multiplication using a GPU on Maverick2. // The program reports the elapsed time taken to complete the matrix // multiplication in milliseconds. It is significantly faster than its // sequential counterpart. //*************************************************************************** #include <stdio.h> #include <cuda.h> enum N {N = 32}; // matmulKernel performs matrix multiplication on a linearized array // This code was given in the slides and adapted for use here __global__ void matmulKernel(int *Ad, int *Bd, int *Cd, int width) { int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0; for (int k = 0; k < width; k++) { int Aelement = Ad[ty * width + k]; int Belement = Bd[k * width + tx]; sum += Aelement * Belement; } Cd[ty * width + tx] = sum; } int main() { // Declarations int A[N * N], B[N * N], C[N * N]; int *Ad, *Bd, *Cd; int size = N * N * sizeof(int); int sum = 0; // Declare the timer // Reference: // https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; // Fill arrays A and C // Array C will be filled with 0s for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { A[i * N + j] = i; C[i * N + j] = 0; } // Fill B int row = N - 1; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) B[i * N + j] = row; row--; } /* Allocate memory and copy to device */ cudaMalloc((void**)&Ad, size); cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&Bd, size); cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&Cd, size); /* End memory allocation and copying to device */ /* Define grid and block dimensions */ dim3 dimGrid( 1, 1 ); dim3 dimBlock( N, N ); /* Record start time */ cudaEventRecord(start); /* Invoke the kernel */ matmulKernel<<<dimGrid, dimBlock>>>(Ad, Bd, Cd, N); /* Record end time */ cudaEventRecord(stop); /* Copy the matrix multiplication results from device to host */ cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost); /* Block CPU execution until the specified event is recorded */ cudaEventSynchronize(stop); /* Returns the elapsed time in milliseconds to the first argument */ cudaEventElapsedTime(&milliseconds, start, stop); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); // Sum the array and print the results for (int i = 0; i < N * N; i++) sum += C[i]; // Print results printf("The summation of all the elements is %d.\n", sum); // Print elapsed time printf("\nElapsed time in milliseconds: %f.\n", milliseconds); return 0; }
791e40f3347c983f1fa469ccc20ba6ca6122ebf2.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuspatial_test/base_fixture.hpp> #include <cuspatial/error.hpp> #include <cuspatial/trajectory.hpp> struct TrajectoryBoundingBoxesErrorTest : public cuspatial::test::BaseFixture {}; TEST_F(TrajectoryBoundingBoxesErrorTest, SizeMismatch) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryBoundingBoxesErrorTest, TypeError) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not integer auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } { // x-y type mismatch auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<double>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryBoundingBoxesErrorTest, Nulls) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto nulls = rmm::device_uvector<int>(1000, rmm::cuda_stream_default); hipMemsetAsync(nulls.data(), 0xcccc, nulls.size(), rmm::cuda_stream_default.value()); auto nulls_buffer = nulls.release(); id.set_null_mask(nulls_buffer, 4000); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } }
791e40f3347c983f1fa469ccc20ba6ca6122ebf2.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuspatial_test/base_fixture.hpp> #include <cuspatial/error.hpp> #include <cuspatial/trajectory.hpp> struct TrajectoryBoundingBoxesErrorTest : public cuspatial::test::BaseFixture {}; TEST_F(TrajectoryBoundingBoxesErrorTest, SizeMismatch) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } { auto id = cudf::column( rmm::device_uvector<int>(size / 2, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryBoundingBoxesErrorTest, TypeError) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); // not integer auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } { // x-y type mismatch auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<double>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } } TEST_F(TrajectoryBoundingBoxesErrorTest, Nulls) { auto const size = 1000; { auto id = cudf::column(rmm::device_uvector<cudf::size_type>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto xs = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto ys = cudf::column( rmm::device_uvector<float>(size, rmm::cuda_stream_default), rmm::device_buffer{}, 0); auto nulls = rmm::device_uvector<int>(1000, rmm::cuda_stream_default); cudaMemsetAsync(nulls.data(), 0xcccc, nulls.size(), rmm::cuda_stream_default.value()); auto nulls_buffer = nulls.release(); id.set_null_mask(nulls_buffer, 4000); EXPECT_THROW(cuspatial::trajectory_bounding_boxes(1, id, xs, ys, this->mr()), cuspatial::logic_error); } }
af2631484308784ff647060878ac8e710c32b4c7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_vector_types.h> #include <stdint.h> #include <assert.h> __device__ __host__ inline uint32_t binarySearch(uint64_t *values, uint64_t input, uint32_t len) { int32_t imin = 0; int32_t imax = len-1; while (imin <= imax) { uint32_t imid = imin + (imax - imin)/2; if (input < values[imid]) { imax = imid - 1; } else { imin = imid + 1; } } return (uint32_t) imin; } int main() { uint64_t data[] = {1,2,4,6,7,9}; for(int i=0; i<6; i++) { uint32_t result = binarySearch(data, data[i], 6); //printf("%u\n",result); } printf("%u\n",binarySearch(data, 0, 6)); printf("%u\n",binarySearch(data, 1, 6)); printf("%u\n",binarySearch(data, 2, 6)); printf("%u\n",binarySearch(data, 3, 6)); printf("%u\n",binarySearch(data, 4, 6)); printf("%u\n",binarySearch(data, 9, 6)); printf("%u\n",binarySearch(data, 10, 6)); return 0; }
af2631484308784ff647060878ac8e710c32b4c7.cu
#include <stdio.h> #include <stdlib.h> #include <vector_types.h> #include <stdint.h> #include <assert.h> __device__ __host__ inline uint32_t binarySearch(uint64_t *values, uint64_t input, uint32_t len) { int32_t imin = 0; int32_t imax = len-1; while (imin <= imax) { uint32_t imid = imin + (imax - imin)/2; if (input < values[imid]) { imax = imid - 1; } else { imin = imid + 1; } } return (uint32_t) imin; } int main() { uint64_t data[] = {1,2,4,6,7,9}; for(int i=0; i<6; i++) { uint32_t result = binarySearch(data, data[i], 6); //printf("%u\n",result); } printf("%u\n",binarySearch(data, 0, 6)); printf("%u\n",binarySearch(data, 1, 6)); printf("%u\n",binarySearch(data, 2, 6)); printf("%u\n",binarySearch(data, 3, 6)); printf("%u\n",binarySearch(data, 4, 6)); printf("%u\n",binarySearch(data, 9, 6)); printf("%u\n",binarySearch(data, 10, 6)); return 0; }
9affa1251abe8fd19efb7008ddeb80ab622fd2ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "glmArray.h" // Copying //////////////////////////////////////////////////////////////////// void copyDeviceToDevice(glmVector<num_t> *destination, glmVector<num_t> *source) { CUDA_WRAP(hipMemcpy(destination->getDeviceData(), source->getDeviceData(), destination->getLength() * sizeof(num_t), hipMemcpyDeviceToDevice)); return; } // Kernels for vector arithmetic ////////////////////////////////////////////// __global__ void vectorSumKernel(int n, num_t *x, num_t *sum) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ num_t sharedX[]; // load data into __shared__ memory num_t xElement = 0.0; if (i < n) { xElement = x[i]; } sharedX[threadIdx.x] = xElement; __syncthreads(); // each loop compresses the data by a factor of 2 for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { sharedX[threadIdx.x] += sharedX[threadIdx.x + offset]; } __syncthreads(); } if (threadIdx.x == 0) { sum[blockIdx.x] = sharedX[0]; } return; } __global__ void vectorAddScalarKernel(int n, num_t *a, num_t b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b; } return; } __global__ void vectorAddKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b[i]; } return; } __global__ void vectorDifferenceKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] - b[i]; } return; } __global__ void vectorMultiplyKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] * b[i]; } return; } __global__ void factorProductKernel(int n, int groupSize, factor_t *factor, num_t *numeric, num_t *result) { unsigned int offset = threadIdx.x; unsigned int i; factor_t factorValue; for (i = offset * groupSize; i < (offset + 1) * groupSize; i++) { if (i < n) { factorValue = factor[i]; if (factorValue > 1) { result[blockDim.x * (factorValue - 2) + offset] += numeric[i]; } } } return; } __global__ void doubleFactorProductKernel(int n, int groupSize, factor_t *factor1, factor_t *factor2, int factor2Size, num_t *numeric, num_t *result) { unsigned int offset = threadIdx.x; unsigned int i; factor_t factor1Value, factor2Value, combinedFactorValue; for (i = offset * groupSize; i < (offset + 1) * groupSize; i++) { if (i < n) { factor1Value = factor1[i]; factor2Value = factor2[i]; if ((factor1Value > 1) && (factor2Value > 1)) { combinedFactorValue = (factor1Value - 2) * factor2Size + (factor2Value - 1); result[blockDim.x * (combinedFactorValue - 1) + offset] += numeric[i]; } } } return; } // Vector Arithmetic Functions //////////////////////////////////////////////// void vectorSumSimple(int length, num_t *input, num_t *output) { int blockCount = length / THREADS_PER_BLOCK + (length % THREADS_PER_BLOCK ? 1 : 0); int sharedMemorySize = THREADS_PER_BLOCK * sizeof(num_t); hipLaunchKernelGGL(( vectorSumKernel), dim3(blockCount), dim3(THREADS_PER_BLOCK), sharedMemorySize, 0, length, input, output); return; } void vectorSumRecursive(int length, num_t *input, num_t *output) { if (length <= THREADS_PER_BLOCK) { // Base case: sum the vector with a single threadblock and save to // the (single) output vectorSumSimple(length, input, output); } else { // Recursive case: allocate space for the partial sums... int tempLength = length / THREADS_PER_BLOCK + (length % THREADS_PER_BLOCK ? 1 : 0); num_t *tempOutput = NULL; CUDA_WRAP(hipMalloc((void **) &tempOutput, tempLength * sizeof(num_t))); // ...calculate the multiple partial sums... vectorSumSimple(length, input, tempOutput); // ...and then recurse on the partial sums vectorSumRecursive(tempLength, tempOutput, output); CUDA_WRAP(hipFree(tempOutput)); } return; } void vectorSum(glmVector<num_t> *vector, num_t *result) { vectorSumRecursive(vector->getLength(), vector->getDeviceData(), result); return; } void vectorAddScalar(glmVector<num_t> *a, num_t b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); hipLaunchKernelGGL(( vectorAddScalarKernel), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, a->getLength(), a->getDeviceData(), b, c->getDeviceData()); return; } void vectorAdd(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); hipLaunchKernelGGL(( vectorAddKernel), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } void vectorDifference(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); hipLaunchKernelGGL(( vectorDifferenceKernel), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } void vectorMultiply(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); hipLaunchKernelGGL(( vectorMultiplyKernel), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } // Factor Product Host-Side Function ////////////////////////////////////////// void factorProduct(glmVector<factor_t> *factor, int numFactorLevels, glmVector<num_t> *numeric, num_t *result, int stride) { int n = factor->getLength(); int groupSize = n / THREADS_PER_BLOCK + (n % THREADS_PER_BLOCK ? 1 : 0); int tempResultSize = sizeof(num_t) * THREADS_PER_BLOCK * numFactorLevels; num_t *tempResult = NULL; CUDA_WRAP(hipMalloc((void **) &tempResult, tempResultSize)); CUDA_WRAP(hipMemset((void *) tempResult, 0, tempResultSize)); hipLaunchKernelGGL(( factorProductKernel), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, n, groupSize, factor->getDeviceData(), numeric->getDeviceData(), tempResult); for (int i = 0; i < numFactorLevels; i++) { vectorSumSimple(THREADS_PER_BLOCK, tempResult + THREADS_PER_BLOCK * i, result + i * stride); } CUDA_WRAP(hipFree(tempResult)); return; } void doubleFactorProduct(glmVector<factor_t> *factor1, glmVector<factor_t> *factor2, int numFactor1Levels, int numFactor2Levels, glmVector<num_t> *numeric, num_t *result, int stride) { int n = factor1->getLength(); int groupSize = n / THREADS_PER_BLOCK + (n % THREADS_PER_BLOCK ? 1 : 0); int tempResultSize = sizeof(num_t) * THREADS_PER_BLOCK * numFactor1Levels * numFactor2Levels; num_t *tempResult = NULL; CUDA_WRAP(hipMalloc((void **) &tempResult, tempResultSize)); CUDA_WRAP(hipMemset((void *) tempResult, 0, tempResultSize)); hipLaunchKernelGGL(( doubleFactorProductKernel), dim3(1), dim3(THREADS_PER_BLOCK), 0, 0, n, groupSize, factor1->getDeviceData(), factor2->getDeviceData(), numFactor2Levels, numeric->getDeviceData(), tempResult); for (int factor1Value = 0; factor1Value < numFactor1Levels; factor1Value++) { for (int factor2Value = 0; factor2Value < numFactor2Levels; factor2Value++) { vectorSumSimple(THREADS_PER_BLOCK, tempResult + THREADS_PER_BLOCK * (factor1Value * numFactor2Levels + factor2Value), result + (factor2Value * stride + factor1Value)); } } CUDA_WRAP(hipFree(tempResult)); return; } // Print Functions //////////////////////////////////////////////////////////// std::ostream& operator<<(std::ostream& os, const glmVector<factor_t>& glmVec) { os << "[" << ((int) glmVec.getHostData()[0]); for (int i = 1; i < glmVec.getLength(); i++) { os << ", " << ((int) glmVec.getHostData()[i]); } os << "]"; return os; }; std::ostream& operator<<(std::ostream& os, const glmVector<num_t>& glmVec) { os << "[" << glmVec.getHostData()[0]; for (int i = 1; i < glmVec.getLength(); i++) { os << ", " << glmVec.getHostData()[i]; } os << "]"; return os; }; std::ostream& operator<<(std::ostream& os, const glmMatrix<factor_t>& glmMat) { for (int i = 0; i < glmMat.getNRows(); i++) { if (i == 0) { os << "[["; } else { os << " ["; } os << ((int) glmMat.getHostData()[i]); for (int j = 1; j < glmMat.getNCols(); j++) { os << ", " << ((int) glmMat.getHostData()[i + j * glmMat.getNRows()]); } os << "]"; if (i < glmMat.getNRows() - 1) { os << std::endl; } } os << "]"; return os; } std::ostream& operator<<(std::ostream& os, const glmMatrix<num_t>& glmMat) { for (int rowNum = 0; rowNum < glmMat.getNRows(); rowNum++) { if (rowNum == 0) { os << "[["; } else { os << " ["; } os << glmMat.getHostData()[rowNum]; for (int colNum = 1; colNum < glmMat.getNCols(); colNum++) { os << ", " << glmMat.getHostData()[rowNum + colNum * glmMat.getNRows()]; } os << "]"; if (rowNum < glmMat.getNRows() - 1) { os << std::endl; } } os << "]"; return os; }
9affa1251abe8fd19efb7008ddeb80ab622fd2ea.cu
#include "glmArray.h" // Copying //////////////////////////////////////////////////////////////////// void copyDeviceToDevice(glmVector<num_t> *destination, glmVector<num_t> *source) { CUDA_WRAP(cudaMemcpy(destination->getDeviceData(), source->getDeviceData(), destination->getLength() * sizeof(num_t), cudaMemcpyDeviceToDevice)); return; } // Kernels for vector arithmetic ////////////////////////////////////////////// __global__ void vectorSumKernel(int n, num_t *x, num_t *sum) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ num_t sharedX[]; // load data into __shared__ memory num_t xElement = 0.0; if (i < n) { xElement = x[i]; } sharedX[threadIdx.x] = xElement; __syncthreads(); // each loop compresses the data by a factor of 2 for (int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (threadIdx.x < offset) { sharedX[threadIdx.x] += sharedX[threadIdx.x + offset]; } __syncthreads(); } if (threadIdx.x == 0) { sum[blockIdx.x] = sharedX[0]; } return; } __global__ void vectorAddScalarKernel(int n, num_t *a, num_t b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b; } return; } __global__ void vectorAddKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] + b[i]; } return; } __global__ void vectorDifferenceKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] - b[i]; } return; } __global__ void vectorMultiplyKernel(int n, num_t *a, num_t *b, num_t *c) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { c[i] = a[i] * b[i]; } return; } __global__ void factorProductKernel(int n, int groupSize, factor_t *factor, num_t *numeric, num_t *result) { unsigned int offset = threadIdx.x; unsigned int i; factor_t factorValue; for (i = offset * groupSize; i < (offset + 1) * groupSize; i++) { if (i < n) { factorValue = factor[i]; if (factorValue > 1) { result[blockDim.x * (factorValue - 2) + offset] += numeric[i]; } } } return; } __global__ void doubleFactorProductKernel(int n, int groupSize, factor_t *factor1, factor_t *factor2, int factor2Size, num_t *numeric, num_t *result) { unsigned int offset = threadIdx.x; unsigned int i; factor_t factor1Value, factor2Value, combinedFactorValue; for (i = offset * groupSize; i < (offset + 1) * groupSize; i++) { if (i < n) { factor1Value = factor1[i]; factor2Value = factor2[i]; if ((factor1Value > 1) && (factor2Value > 1)) { combinedFactorValue = (factor1Value - 2) * factor2Size + (factor2Value - 1); result[blockDim.x * (combinedFactorValue - 1) + offset] += numeric[i]; } } } return; } // Vector Arithmetic Functions //////////////////////////////////////////////// void vectorSumSimple(int length, num_t *input, num_t *output) { int blockCount = length / THREADS_PER_BLOCK + (length % THREADS_PER_BLOCK ? 1 : 0); int sharedMemorySize = THREADS_PER_BLOCK * sizeof(num_t); vectorSumKernel<<<blockCount, THREADS_PER_BLOCK, sharedMemorySize>>>(length, input, output); return; } void vectorSumRecursive(int length, num_t *input, num_t *output) { if (length <= THREADS_PER_BLOCK) { // Base case: sum the vector with a single threadblock and save to // the (single) output vectorSumSimple(length, input, output); } else { // Recursive case: allocate space for the partial sums... int tempLength = length / THREADS_PER_BLOCK + (length % THREADS_PER_BLOCK ? 1 : 0); num_t *tempOutput = NULL; CUDA_WRAP(cudaMalloc((void **) &tempOutput, tempLength * sizeof(num_t))); // ...calculate the multiple partial sums... vectorSumSimple(length, input, tempOutput); // ...and then recurse on the partial sums vectorSumRecursive(tempLength, tempOutput, output); CUDA_WRAP(cudaFree(tempOutput)); } return; } void vectorSum(glmVector<num_t> *vector, num_t *result) { vectorSumRecursive(vector->getLength(), vector->getDeviceData(), result); return; } void vectorAddScalar(glmVector<num_t> *a, num_t b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); vectorAddScalarKernel<<<numBlocks, THREADS_PER_BLOCK>>>(a->getLength(), a->getDeviceData(), b, c->getDeviceData()); return; } void vectorAdd(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); vectorAddKernel<<<numBlocks, THREADS_PER_BLOCK>>>(a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } void vectorDifference(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); vectorDifferenceKernel<<<numBlocks, THREADS_PER_BLOCK>>>(a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } void vectorMultiply(glmVector<num_t> *a, glmVector<num_t> *b, glmVector<num_t> *c) { int numBlocks = a->getNumBlocks(); vectorMultiplyKernel<<<numBlocks, THREADS_PER_BLOCK>>>(a->getLength(), a->getDeviceData(), b->getDeviceData(), c->getDeviceData()); return; } // Factor Product Host-Side Function ////////////////////////////////////////// void factorProduct(glmVector<factor_t> *factor, int numFactorLevels, glmVector<num_t> *numeric, num_t *result, int stride) { int n = factor->getLength(); int groupSize = n / THREADS_PER_BLOCK + (n % THREADS_PER_BLOCK ? 1 : 0); int tempResultSize = sizeof(num_t) * THREADS_PER_BLOCK * numFactorLevels; num_t *tempResult = NULL; CUDA_WRAP(cudaMalloc((void **) &tempResult, tempResultSize)); CUDA_WRAP(cudaMemset((void *) tempResult, 0, tempResultSize)); factorProductKernel<<<1, THREADS_PER_BLOCK>>>(n, groupSize, factor->getDeviceData(), numeric->getDeviceData(), tempResult); for (int i = 0; i < numFactorLevels; i++) { vectorSumSimple(THREADS_PER_BLOCK, tempResult + THREADS_PER_BLOCK * i, result + i * stride); } CUDA_WRAP(cudaFree(tempResult)); return; } void doubleFactorProduct(glmVector<factor_t> *factor1, glmVector<factor_t> *factor2, int numFactor1Levels, int numFactor2Levels, glmVector<num_t> *numeric, num_t *result, int stride) { int n = factor1->getLength(); int groupSize = n / THREADS_PER_BLOCK + (n % THREADS_PER_BLOCK ? 1 : 0); int tempResultSize = sizeof(num_t) * THREADS_PER_BLOCK * numFactor1Levels * numFactor2Levels; num_t *tempResult = NULL; CUDA_WRAP(cudaMalloc((void **) &tempResult, tempResultSize)); CUDA_WRAP(cudaMemset((void *) tempResult, 0, tempResultSize)); doubleFactorProductKernel<<<1, THREADS_PER_BLOCK>>>(n, groupSize, factor1->getDeviceData(), factor2->getDeviceData(), numFactor2Levels, numeric->getDeviceData(), tempResult); for (int factor1Value = 0; factor1Value < numFactor1Levels; factor1Value++) { for (int factor2Value = 0; factor2Value < numFactor2Levels; factor2Value++) { vectorSumSimple(THREADS_PER_BLOCK, tempResult + THREADS_PER_BLOCK * (factor1Value * numFactor2Levels + factor2Value), result + (factor2Value * stride + factor1Value)); } } CUDA_WRAP(cudaFree(tempResult)); return; } // Print Functions //////////////////////////////////////////////////////////// std::ostream& operator<<(std::ostream& os, const glmVector<factor_t>& glmVec) { os << "[" << ((int) glmVec.getHostData()[0]); for (int i = 1; i < glmVec.getLength(); i++) { os << ", " << ((int) glmVec.getHostData()[i]); } os << "]"; return os; }; std::ostream& operator<<(std::ostream& os, const glmVector<num_t>& glmVec) { os << "[" << glmVec.getHostData()[0]; for (int i = 1; i < glmVec.getLength(); i++) { os << ", " << glmVec.getHostData()[i]; } os << "]"; return os; }; std::ostream& operator<<(std::ostream& os, const glmMatrix<factor_t>& glmMat) { for (int i = 0; i < glmMat.getNRows(); i++) { if (i == 0) { os << "[["; } else { os << " ["; } os << ((int) glmMat.getHostData()[i]); for (int j = 1; j < glmMat.getNCols(); j++) { os << ", " << ((int) glmMat.getHostData()[i + j * glmMat.getNRows()]); } os << "]"; if (i < glmMat.getNRows() - 1) { os << std::endl; } } os << "]"; return os; } std::ostream& operator<<(std::ostream& os, const glmMatrix<num_t>& glmMat) { for (int rowNum = 0; rowNum < glmMat.getNRows(); rowNum++) { if (rowNum == 0) { os << "[["; } else { os << " ["; } os << glmMat.getHostData()[rowNum]; for (int colNum = 1; colNum < glmMat.getNCols(); colNum++) { os << ", " << glmMat.getHostData()[rowNum + colNum * glmMat.getNRows()]; } os << "]"; if (rowNum < glmMat.getNRows() - 1) { os << std::endl; } } os << "]"; return os; }
9970fa384cf8da938c423d6694345befd845e5e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "config.h" template <typename scalar_t> __device__ scalar_t modulated_deform_conv3d_im2col_trilinear(const scalar_t *bottom_data, const int data_width,const int data_length, const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l) { int h_low = floor(h); int w_low = floor(w); int l_low = floor(l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t lh = h - h_low;//dh scalar_t lw = w - w_low;//dw scalar_t ll = l - l_low;//dl scalar_t hh = 1 - lh, hw = 1 - lw, hl = 1 - ll; //1-dh 1-dw 1-dl scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0 && l_low >= 0) v1 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_low]; scalar_t v2 = 0; if (h_low >= 0 && w_low >=0 && l_high<= length -1) v2 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_high]; scalar_t v3 = 0; if (h_low >= 0 && w_high <= width - 1 && l_low >= 0) v3 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_low]; scalar_t v4 = 0; if (h_low >= 0 && w_high <= width - 1 && l_high<= length -1) v4 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_high]; scalar_t v5 = 0; if (h_high <= height -1 && w_low >= 0 && l_low >= 0) v5 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_low]; scalar_t v6 = 0; if (h_high <= height -1 && w_low >= 0 && l_high<= length -1) v6 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_high]; scalar_t v7 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_low >= 0) v7 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_low]; scalar_t v8 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_high<= length -1) v8 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_high]; scalar_t w1 = hh * hw *hl, w2 = hh *hw *ll, w3 = hh * lw*hl, w4 = hh * lw* ll; scalar_t w5 = lh * hw *hl, w6 = lh *hw *ll, w7 = lh * lw*hl, w8 = lh * lw* ll; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); return val; } template <typename scalar_t> __global__ void modulated_deform_conv3d_im2col_gpu_kernel( const int n,const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int length, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int l_col = index % length_col; const int w_col = (index / length_col) % width_col; const int h_col = (index / length_col / width_col ) % height_col; const int b_col = (index / length_col / width_col / height_col) % batch_size; const int c_im = (index / length_col/ width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w* kernel_l; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; const int l_in = l_col * stride_l - pad_l; scalar_t *data_col_ptr = data_col+(c_col*batch_size + b_col) *height_col*width_col*length_col+ h_col*width_col*length_col+w_col*length_col+l_col; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width * length; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 3 * kernel_h * kernel_w * kernel_l * height_col * width_col * length_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w *kernel_l * height_col * width_col * length_col; for (int i = 0; i < kernel_h; ++i){ for (int j = 0; j < kernel_w; ++j){ for (int k = 0; k < kernel_l; ++k){ int f=i*kernel_w*kernel_l + j*kernel_l+k; const int data_offset_h_ptr = (3*f) * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const int data_offset_w_ptr = (3*f+1) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_offset_l_ptr = (3*f+2) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_mask_hwl_ptr = f * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t offset_l = data_offset_ptr[data_offset_l_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hwl_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; const scalar_t l_im = l_in + k * dilation_l + offset_l; if (h_im > -1 && w_im > -1 && l_im > -1 && h_im < height && w_im < width && l_im < length) { val = modulated_deform_conv3d_im2col_trilinear(data_im_ptr, width, length, height, width, length, h_im, w_im,l_im); } *data_col_ptr = val*mask; data_col_ptr += batch_size * height_col * width_col* length_col; }}} } } void modulated_deform_conv3d_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset,const at::Tensor data_mask, const int batch_size,const int channels, const int height_im, const int width_im, const int length_im, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w,const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * height_col * width_col * length_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deform_conv3d_im2col_cuda", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deform_conv3d_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, length_im, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group,batch_size, channels, deformable_group, height_col, width_col, length_col,data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deform_conv3d_im2col_cuda: %s\n", hipGetErrorString(err)); } } int modulated_deform_conv3d_forward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor offset, at::Tensor mask, at::Tensor output, const int kernel_h, const int kernel_w, const int kernel_l, const int stride_h, const int stride_w, const int stride_l, const int pad_h, const int pad_w, const int pad_l, const int dilation_h,const int dilation_w, const int dilation_l, const int group, const int deformable_group,const bool with_bias) { TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w,kernel_l, kernel_h_, kernel_w_,kernel_l_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; // resize output output = output.view({batch, channels_out, height_out, width_out,length_out}); output=output.zero_(); // resize temporary columns at::Tensor columns = at::zeros({channels * kernel_h * kernel_w* kernel_l, height_out * width_out*length_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3),output.size(4)}); for (int b = 0; b < batch; b++) { columns.fill_(0); modulated_deform_conv3d_im2col_cuda( input[b], offset[b], mask[b],1, channels, height, width,length, height_out,width_out, length_out, kernel_h, kernel_w,kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3),weight.size(4)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g],1.0f,1.0f) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4),weight.size(5)}); columns =columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4),output.size(5)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1, 1}); } return 0; } template <typename scalar_t> __global__ void modulated_deform_conv3d_gradient_gpu_kernel( const int n,const scalar_t *grad_col, const scalar_t *data_input, const scalar_t *data_offset, const scalar_t *data_mask, scalar_t * columns, const int channels_input, const int height_input, const int width_input, const int length_input, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int offset_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t * grad_input,scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { int f = (index / length_col / width_col / height_col )%(kernel_h * kernel_w * kernel_l); int i=(f / kernel_l / kernel_w) % kernel_h; int j=(f / kernel_l) %kernel_w; int k=f % kernel_l; int lpos_col = index % length_col; int wpos_col = (index / length_col) % width_col; int hpos_col = (index / length_col / width_col) % height_col; int cpos_col = (index / length_col / width_col / height_col); int cpos_in=cpos_col/kernel_h/kernel_w/kernel_l; int offset_group_index=cpos_in/(channels_input/deformable_group); //printf("index %d cpos_col %d hpos_col %d wpos_col %d \n",index,cpos_col,hpos_col,wpos_col); int offset_h_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ 3*f*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_w_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ (3*f+1)*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_l_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ (3*f+2)*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int mask_hwl_ptr=offset_group_index*kernel_h*kernel_w*kernel_l*height_col*width_col*length_col+ f*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; scalar_t offset_h=data_offset[offset_h_ptr]; scalar_t offset_w=data_offset[offset_w_ptr]; scalar_t offset_l=data_offset[offset_l_ptr]; int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h; int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w; int lpos_in = lpos_col * stride_l - pad_l + (k) * dilation_l; auto real_offset_h=hpos_in+offset_h; auto real_offset_w=wpos_in+offset_w; auto real_offset_l=lpos_in+offset_l; int h_low = floor(real_offset_h); int w_low = floor(real_offset_w); int l_low = floor(real_offset_l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t dh = real_offset_h - h_low; scalar_t dw = real_offset_w - w_low; scalar_t dl = real_offset_l - l_low; scalar_t v1 = 0; if (h_low >= 0 && h_low <= height_input -1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) v1 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_low]; scalar_t v2 = 0; if (h_low >= 0 && h_low <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input - 1 && abs(dl)>EPS ) v2 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_high]; scalar_t v3 = 0; if (h_low >= 0 && h_low <= height_input -1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dw)>EPS ) v3 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_low]; scalar_t v4 = 0; if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 && abs(dw)>EPS && abs(dl)>EPS) v4 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_high]; scalar_t v5 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dh)>EPS) v5 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_low]; scalar_t v6 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input -1 && abs(dh)>EPS && abs(dl)>EPS ) v6 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_high]; scalar_t v7 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dh)>EPS && abs(dw)>EPS) v7 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_low]; scalar_t v8 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 && abs(dh)>EPS && abs(dw)>EPS && abs(dl)>EPS ) v8 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_high]; scalar_t w1 = (1-dh) *(1- dw)*(1-dl), w2 =(1- dh) *(1- dw)*dl, w3 = (1-dh)*dw*(1-dl), w4 = (1-dh) * dw*dl; scalar_t w5 = dh *(1- dw)*(1-dl), w6 =dh*(1- dw)*dl, w7 = dh*dw*(1-dl), w8 = dh*dw*dl; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); scalar_t col=val*data_mask[mask_hwl_ptr];// auto dval=data_mask[mask_hwl_ptr]*grad_col[index]; if (h_low >= 0 && h_low <= height_input -1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_low,w1*dval); if (h_low >= 0 && h_low <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_high,w2*dval); if (h_low >= 0 && h_low <= height_input -1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_low,w3*dval); if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_high,w4*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_low,w5*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input -1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_high,w6*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_low,w7*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_high,w8*dval); atomicAdd(grad_offset + offset_h_ptr, (-1*(1-dw)*(1-dl)*v1-1*(1-dw)*dl*v2-1*dw*(1-dl)*v3-1*dw*dl*v4+(1-dw)*(1-dl)*v5+(1-dw)*dl*v6+dw*(1-dl)*v7+dw*dl*v8)*dval); atomicAdd(grad_offset + offset_w_ptr, (-1*(1-dh)*(1-dl)*v1-1*(1-dh)*dl*v2+(1-dh)*(1-dl)*v3+(1-dh)*dl*v4-1*dh*(1-dl)*v5-1*dh*dl*v6+dh*(1-dl)*v7+dh*dl*v8)*dval); atomicAdd(grad_offset + offset_l_ptr, (-1*(1-dh)*(1-dw)*v1+(1-dh)*(1-dw)*v2-1*(1-dh)*dw*v3+(1-dh)*dw*v4-1*dh*(1-dw)*v5+dh*(1-dw)*v6-1*dh*dw*v7+dh*dw*v8)*dval); atomicAdd(grad_mask + mask_hwl_ptr,val*grad_col[index]); columns[index]=col; } } // gradient offset mask input void modulated_deform_conv3d_gradient_cuda( const at::Tensor grad_col, const at::Tensor data_input, const at::Tensor data_offset, const at::Tensor data_mask, at::Tensor columns, const int channels, const int height_input, const int width_input, const int length_input, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor grad_input, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels =channels*height_col * width_col * length_col * kernel_h * kernel_w * kernel_l; const int channel_per_deformable_group =3 * kernel_h * kernel_w * kernel_l; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_col.scalar_type(), "modulated_deform_conv3d_gradient_cuda", ([&] { const scalar_t *grad_col_ = grad_col.data<scalar_t>(); const scalar_t *data_input_ = data_input.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *columns_ = columns.data<scalar_t>(); scalar_t *grad_input_ = grad_input.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deform_conv3d_gradient_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, grad_col_, data_input_, data_offset_, data_mask_,columns_, channels, height_input, width_input, length_input, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, channel_per_deformable_group * deformable_group, deformable_group, height_col, width_col, length_col, grad_input_,grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } } int modulated_deform_conv3d_backward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset, at::Tensor mask, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, int kernel_h, int kernel_w, int kernel_l, int stride_h, int stride_w, int stride_l, int pad_h, int pad_w, int pad_l, int dilation_h, int dilation_w, int dilation_l, int group, int deformable_group,const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w_, kernel_l_, kernel_h, kernel_w, kernel_l); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; at::Tensor ones = at::ones({height_out, width_out, length_out}, input.options()); grad_input = grad_input.view({batch, channels, height, width, length}); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, height_out * width_out * length_out}, input.options()); at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w * kernel_l, height_out * width_out * length_out},input.options()); grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3), grad_output.size(4)}); for (int b = 0; b < batch; b++) { // divide int group grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); columns.fill_(0); modulated_deform_conv3d_gradient_cuda( grad_columns, input[b], offset[b], mask[b], columns, channels, height, width, length, height_out, width_out, length_out, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, deformable_group, grad_input[b],grad_offset[b],grad_mask[b]); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); // std::cout<<"columns \n"<<columns<<std::endl; grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3),grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g].flatten(1).addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1),1.0f,1.0f).view_as(grad_weight[g]); if (with_bias) { at::Tensor temp=grad_bias[g].view({-1, 1}); temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f); grad_bias[g] =temp.view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4), grad_output.size(5)}); return 0; }
9970fa384cf8da938c423d6694345befd845e5e5.cu
#include "config.h" template <typename scalar_t> __device__ scalar_t modulated_deform_conv3d_im2col_trilinear(const scalar_t *bottom_data, const int data_width,const int data_length, const int height, const int width, const int length,scalar_t h, scalar_t w,scalar_t l) { int h_low = floor(h); int w_low = floor(w); int l_low = floor(l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t lh = h - h_low;//dh scalar_t lw = w - w_low;//dw scalar_t ll = l - l_low;//dl scalar_t hh = 1 - lh, hw = 1 - lw, hl = 1 - ll; //1-dh 1-dw 1-dl scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0 && l_low >= 0) v1 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_low]; scalar_t v2 = 0; if (h_low >= 0 && w_low >=0 && l_high<= length -1) v2 = bottom_data[h_low * data_width*data_length + w_low*data_length+ l_high]; scalar_t v3 = 0; if (h_low >= 0 && w_high <= width - 1 && l_low >= 0) v3 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_low]; scalar_t v4 = 0; if (h_low >= 0 && w_high <= width - 1 && l_high<= length -1) v4 = bottom_data[h_low * data_width*data_length + w_high*data_length+ l_high]; scalar_t v5 = 0; if (h_high <= height -1 && w_low >= 0 && l_low >= 0) v5 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_low]; scalar_t v6 = 0; if (h_high <= height -1 && w_low >= 0 && l_high<= length -1) v6 = bottom_data[h_high * data_width*data_length + w_low*data_length+ l_high]; scalar_t v7 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_low >= 0) v7 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_low]; scalar_t v8 = 0; if (h_high <= height -1 && w_high <= width - 1 && l_high<= length -1) v8 = bottom_data[h_high * data_width*data_length + w_high*data_length+ l_high]; scalar_t w1 = hh * hw *hl, w2 = hh *hw *ll, w3 = hh * lw*hl, w4 = hh * lw* ll; scalar_t w5 = lh * hw *hl, w6 = lh *hw *ll, w7 = lh * lw*hl, w8 = lh * lw* ll; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); return val; } template <typename scalar_t> __global__ void modulated_deform_conv3d_im2col_gpu_kernel( const int n,const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask, const int height, const int width, const int length, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t *data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int l_col = index % length_col; const int w_col = (index / length_col) % width_col; const int h_col = (index / length_col / width_col ) % height_col; const int b_col = (index / length_col / width_col / height_col) % batch_size; const int c_im = (index / length_col/ width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w* kernel_l; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; const int l_in = l_col * stride_l - pad_l; scalar_t *data_col_ptr = data_col+(c_col*batch_size + b_col) *height_col*width_col*length_col+ h_col*width_col*length_col+w_col*length_col+l_col; const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width * length; const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 3 * kernel_h * kernel_w * kernel_l * height_col * width_col * length_col; const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w *kernel_l * height_col * width_col * length_col; for (int i = 0; i < kernel_h; ++i){ for (int j = 0; j < kernel_w; ++j){ for (int k = 0; k < kernel_l; ++k){ int f=i*kernel_w*kernel_l + j*kernel_l+k; const int data_offset_h_ptr = (3*f) * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const int data_offset_w_ptr = (3*f+1) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_offset_l_ptr = (3*f+2) * height_col * width_col* length_col + h_col* width_col* length_col + w_col* length_col + l_col; const int data_mask_hwl_ptr = f * height_col * width_col * length_col+ h_col* width_col * length_col+ w_col* length_col + l_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t offset_l = data_offset_ptr[data_offset_l_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hwl_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; const scalar_t l_im = l_in + k * dilation_l + offset_l; if (h_im > -1 && w_im > -1 && l_im > -1 && h_im < height && w_im < width && l_im < length) { val = modulated_deform_conv3d_im2col_trilinear(data_im_ptr, width, length, height, width, length, h_im, w_im,l_im); } *data_col_ptr = val*mask; data_col_ptr += batch_size * height_col * width_col* length_col; }}} } } void modulated_deform_conv3d_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset,const at::Tensor data_mask, const int batch_size,const int channels, const int height_im, const int width_im, const int length_im, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w,const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * height_col * width_col * length_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deform_conv3d_im2col_cuda", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); modulated_deform_conv3d_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, length_im, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group,batch_size, channels, deformable_group, height_col, width_col, length_col,data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deform_conv3d_im2col_cuda: %s\n", cudaGetErrorString(err)); } } int modulated_deform_conv3d_forward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor offset, at::Tensor mask, at::Tensor output, const int kernel_h, const int kernel_w, const int kernel_l, const int stride_h, const int stride_w, const int stride_l, const int pad_h, const int pad_w, const int pad_l, const int dilation_h,const int dilation_w, const int dilation_l, const int group, const int deformable_group,const bool with_bias) { TORCH_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); TORCH_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w,kernel_l, kernel_h_, kernel_w_,kernel_l_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; // resize output output = output.view({batch, channels_out, height_out, width_out,length_out}); output=output.zero_(); // resize temporary columns at::Tensor columns = at::zeros({channels * kernel_h * kernel_w* kernel_l, height_out * width_out*length_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3),output.size(4)}); for (int b = 0; b < batch; b++) { columns.fill_(0); modulated_deform_conv3d_im2col_cuda( input[b], offset[b], mask[b],1, channels, height, width,length, height_out,width_out, length_out, kernel_h, kernel_w,kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3),weight.size(4)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g],1.0f,1.0f) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4),weight.size(5)}); columns =columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4),output.size(5)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1, 1}); } return 0; } template <typename scalar_t> __global__ void modulated_deform_conv3d_gradient_gpu_kernel( const int n,const scalar_t *grad_col, const scalar_t *data_input, const scalar_t *data_offset, const scalar_t *data_mask, scalar_t * columns, const int channels_input, const int height_input, const int width_input, const int length_input, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int channel_per_deformable_group, const int offset_channels, const int deformable_group, const int height_col, const int width_col, const int length_col, scalar_t * grad_input,scalar_t *grad_offset, scalar_t *grad_mask) { CUDA_KERNEL_LOOP(index, n) { int f = (index / length_col / width_col / height_col )%(kernel_h * kernel_w * kernel_l); int i=(f / kernel_l / kernel_w) % kernel_h; int j=(f / kernel_l) %kernel_w; int k=f % kernel_l; int lpos_col = index % length_col; int wpos_col = (index / length_col) % width_col; int hpos_col = (index / length_col / width_col) % height_col; int cpos_col = (index / length_col / width_col / height_col); int cpos_in=cpos_col/kernel_h/kernel_w/kernel_l; int offset_group_index=cpos_in/(channels_input/deformable_group); //printf("index %d cpos_col %d hpos_col %d wpos_col %d \n",index,cpos_col,hpos_col,wpos_col); int offset_h_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ 3*f*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_w_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ (3*f+1)*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int offset_l_ptr=offset_group_index*channel_per_deformable_group*height_col*width_col*length_col+ (3*f+2)*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; int mask_hwl_ptr=offset_group_index*kernel_h*kernel_w*kernel_l*height_col*width_col*length_col+ f*height_col*width_col*length_col+hpos_col*width_col*length_col+wpos_col*length_col+lpos_col; scalar_t offset_h=data_offset[offset_h_ptr]; scalar_t offset_w=data_offset[offset_w_ptr]; scalar_t offset_l=data_offset[offset_l_ptr]; int hpos_in = hpos_col * stride_h -pad_h + (i) * dilation_h; int wpos_in = wpos_col * stride_w - pad_w + (j) * dilation_w; int lpos_in = lpos_col * stride_l - pad_l + (k) * dilation_l; auto real_offset_h=hpos_in+offset_h; auto real_offset_w=wpos_in+offset_w; auto real_offset_l=lpos_in+offset_l; int h_low = floor(real_offset_h); int w_low = floor(real_offset_w); int l_low = floor(real_offset_l); int h_high = h_low + 1; int w_high = w_low + 1; int l_high = l_low + 1; scalar_t dh = real_offset_h - h_low; scalar_t dw = real_offset_w - w_low; scalar_t dl = real_offset_l - l_low; scalar_t v1 = 0; if (h_low >= 0 && h_low <= height_input -1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) v1 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_low]; scalar_t v2 = 0; if (h_low >= 0 && h_low <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input - 1 && abs(dl)>EPS ) v2 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_high]; scalar_t v3 = 0; if (h_low >= 0 && h_low <= height_input -1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dw)>EPS ) v3 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_low]; scalar_t v4 = 0; if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 && abs(dw)>EPS && abs(dl)>EPS) v4 = data_input[cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_high]; scalar_t v5 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dh)>EPS) v5 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_low]; scalar_t v6 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input -1 && abs(dh)>EPS && abs(dl)>EPS ) v6 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_high]; scalar_t v7 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 && abs(dh)>EPS && abs(dw)>EPS) v7 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_low]; scalar_t v8 = 0; if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 && abs(dh)>EPS && abs(dw)>EPS && abs(dl)>EPS ) v8 = data_input[cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_high]; scalar_t w1 = (1-dh) *(1- dw)*(1-dl), w2 =(1- dh) *(1- dw)*dl, w3 = (1-dh)*dw*(1-dl), w4 = (1-dh) * dw*dl; scalar_t w5 = dh *(1- dw)*(1-dl), w6 =dh*(1- dw)*dl, w7 = dh*dw*(1-dl), w8 = dh*dw*dl; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4+w5 * v5 + w6 * v6 + w7 * v7 + w8 * v8); scalar_t col=val*data_mask[mask_hwl_ptr];// auto dval=data_mask[mask_hwl_ptr]*grad_col[index]; if (h_low >= 0 && h_low <= height_input -1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_low,w1*dval); if (h_low >= 0 && h_low <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_low* length_input+l_high,w2*dval); if (h_low >= 0 && h_low <= height_input -1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_low,w3*dval); if (h_low >= 0 && h_low <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_low * width_input*length_input + w_high* length_input+l_high,w4*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_low,w5*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_low >= 0 && w_low <= width_input - 1 && l_high >= 0 && l_high <= length_input -1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_low* length_input+l_high,w6*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1 && l_low >= 0 && l_low <= length_input - 1) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_low,w7*dval); if (h_high >= 0 && h_high <= height_input - 1 && w_high >= 0 && w_high <= width_input - 1&& l_high >= 0 && l_high <= length_input - 1 ) atomicAdd(grad_input+cpos_in*height_input*width_input*length_input +h_high * width_input*length_input + w_high* length_input+l_high,w8*dval); atomicAdd(grad_offset + offset_h_ptr, (-1*(1-dw)*(1-dl)*v1-1*(1-dw)*dl*v2-1*dw*(1-dl)*v3-1*dw*dl*v4+(1-dw)*(1-dl)*v5+(1-dw)*dl*v6+dw*(1-dl)*v7+dw*dl*v8)*dval); atomicAdd(grad_offset + offset_w_ptr, (-1*(1-dh)*(1-dl)*v1-1*(1-dh)*dl*v2+(1-dh)*(1-dl)*v3+(1-dh)*dl*v4-1*dh*(1-dl)*v5-1*dh*dl*v6+dh*(1-dl)*v7+dh*dl*v8)*dval); atomicAdd(grad_offset + offset_l_ptr, (-1*(1-dh)*(1-dw)*v1+(1-dh)*(1-dw)*v2-1*(1-dh)*dw*v3+(1-dh)*dw*v4-1*dh*(1-dw)*v5+dh*(1-dw)*v6-1*dh*dw*v7+dh*dw*v8)*dval); atomicAdd(grad_mask + mask_hwl_ptr,val*grad_col[index]); columns[index]=col; } } // gradient offset mask input void modulated_deform_conv3d_gradient_cuda( const at::Tensor grad_col, const at::Tensor data_input, const at::Tensor data_offset, const at::Tensor data_mask, at::Tensor columns, const int channels, const int height_input, const int width_input, const int length_input, const int height_col, const int width_col, const int length_col, const int kernel_h, const int kernel_w, const int kernel_l, const int pad_h, const int pad_w, const int pad_l, const int stride_h, const int stride_w, const int stride_l, const int dilation_h, const int dilation_w, const int dilation_l, const int deformable_group, at::Tensor grad_input, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels =channels*height_col * width_col * length_col * kernel_h * kernel_w * kernel_l; const int channel_per_deformable_group =3 * kernel_h * kernel_w * kernel_l; AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_col.scalar_type(), "modulated_deform_conv3d_gradient_cuda", ([&] { const scalar_t *grad_col_ = grad_col.data<scalar_t>(); const scalar_t *data_input_ = data_input.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *columns_ = columns.data<scalar_t>(); scalar_t *grad_input_ = grad_input.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); modulated_deform_conv3d_gradient_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, grad_col_, data_input_, data_offset_, data_mask_,columns_, channels, height_input, width_input, length_input, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, channel_per_deformable_group, channel_per_deformable_group * deformable_group, deformable_group, height_col, width_col, length_col, grad_input_,grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } } int modulated_deform_conv3d_backward_cuda( at::Tensor input, at::Tensor weight, at::Tensor bias,at::Tensor offset, at::Tensor mask, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, int kernel_h, int kernel_w, int kernel_l, int stride_h, int stride_w, int stride_l, int pad_h, int pad_w, int pad_l, int dilation_h, int dilation_w, int dilation_l, int group, int deformable_group,const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); const int batch = input.size(0); const int channels = input.size(1); const int height = input.size(2); const int width = input.size(3); const int length = input.size(4); const int channels_kernel = weight.size(1); const int kernel_h_ = weight.size(2); const int kernel_w_ = weight.size(3); const int kernel_l_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_l_ != kernel_l) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h_, kernel_w_, kernel_l_, kernel_h, kernel_w, kernel_l); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int length_out = (length + 2 * pad_l - (dilation_l * (kernel_l - 1) + 1)) / stride_l + 1; at::Tensor ones = at::ones({height_out, width_out, length_out}, input.options()); grad_input = grad_input.view({batch, channels, height, width, length}); at::Tensor columns = at::zeros({channels * kernel_h * kernel_w * kernel_l, height_out * width_out * length_out}, input.options()); at::Tensor grad_columns=at::zeros({channels * kernel_h * kernel_w * kernel_l, height_out * width_out * length_out},input.options()); grad_output =grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3), grad_output.size(4)}); for (int b = 0; b < batch; b++) { // divide int group grad_columns = grad_columns.view({group, grad_columns.size(0) / group, grad_columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1),weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { grad_columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } grad_columns = grad_columns.view({grad_columns.size(0) * grad_columns.size(1), grad_columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); columns.fill_(0); modulated_deform_conv3d_gradient_cuda( grad_columns, input[b], offset[b], mask[b], columns, channels, height, width, length, height_out, width_out, length_out, kernel_h, kernel_w, kernel_l, pad_h, pad_w, pad_l, stride_h, stride_w, stride_l, dilation_h, dilation_w, dilation_l, deformable_group, grad_input[b],grad_offset[b],grad_mask[b]); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); // std::cout<<"columns \n"<<columns<<std::endl; grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3),grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g].flatten(1).addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1),1.0f,1.0f).view_as(grad_weight[g]); if (with_bias) { at::Tensor temp=grad_bias[g].view({-1, 1}); temp.addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1}),1.0f,1.0f); grad_bias[g] =temp.view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4), grad_output.size(5)}); return 0; }
c3f16f64740ea4c38c538941e2e8bd1dabb97c48.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <locale.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../custring.cuh" #include "../unicode/unicode_flags.h" #include "../unicode/charcases.h" #include "../util.h" // void printCudaError( hipError_t err, const char* prefix ) { if( err == hipSuccess ) return; fprintf(stderr,"%s: %s(%d):%s\n",prefix,hipGetErrorName(err),(int)err,hipGetErrorString(err)); //hipError_t err2 = hipGetLastError(); // clears the error too //if( err != err2 ) // fprintf(stderr," %s:(%d):%s\n",hipGetErrorName(err2),(int)err2,hipGetErrorString(err2)); } // char32_t* to_char32( const char* ca ) { unsigned int size = (unsigned int)strlen(ca); unsigned int count = custring_view::chars_in_string(ca,size); char32_t* rtn = new char32_t[count+1]; char32_t* optr = rtn; const char* iptr = ca; for( unsigned int i=0; i < size; ++i ) { Char oc = 0; unsigned int cw = custring_view::char_to_Char(iptr,oc); iptr += cw; i += cw - 1; *optr++ = oc; } rtn[count] = 0; return rtn; } // static unsigned char* d_unicode_flags = nullptr; unsigned char* get_unicode_flags() { if( !d_unicode_flags ) { // leave this out of RMM since it is never freed hipMalloc(&d_unicode_flags,65536); hipMemcpy(d_unicode_flags,unicode_flags,65536,hipMemcpyHostToDevice); } return d_unicode_flags; } static unsigned short* d_charcases = nullptr; unsigned short* get_charcases() { if( !d_charcases ) { // leave this out of RMM since it is never freed hipMalloc(&d_charcases,65536*sizeof(unsigned short)); hipMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),hipMemcpyHostToDevice); } return d_charcases; } // NVStringsImpl::NVStringsImpl(unsigned int count) : bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0) { pList = new rmm::device_vector<custring_view*>(count,nullptr); } NVStringsImpl::~NVStringsImpl() { if( memoryBuffer && !bIpcHandle ) RMM_FREE(memoryBuffer,0); if( bIpcHandle ) hipIpcCloseMemHandle(memoryBuffer); memoryBuffer = nullptr; delete pList; pList = nullptr; bufferSize = 0; } char* NVStringsImpl::createMemoryFor( size_t* d_lengths ) { unsigned int count = (unsigned int)pList->size(); auto execpol = rmm::exec_policy(stream_id); bufferSize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count); if( bufferSize==0 ) return 0; // this is valid; all sizes are zero memoryBuffer = device_alloc<char>(bufferSize,stream_id); return memoryBuffer; } // int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count ) { hipError_t err = hipSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); //hoffsets[0] = 0; --already set by this ----^ thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { const char* str = strs[idx]; size_t len = ( str ? (strlen(str)+1) : 0 ); size_t nsz = len; // include null-terminator if( len > 0 ) // len=0 is null, len=1 is empty string { hlengths[idx] = len; // just the string length int nchars = custring_view::chars_in_string(str,(int)len-1); nsz = custring_view::alloc_size((int)len-1,nchars); } nsz = ALIGN_SIZE(nsz); nbytes += nsz; hoffsets[idx+1] = nbytes; } // check if they are all null if( nbytes==0 ) return (int)err; // Host serialization size_t cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); if( !h_flatstrs ) { fprintf(stderr,"init_from_strings: not enough CPU memory for intermediate buffer of size %ld bytes\n", nbytes); return -1; } for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]); // copy to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = hipMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != hipSuccess ) { fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> offsets(hoffsets); rmm::device_vector<size_t> lengths(hlengths); size_t* d_offsets = offsets.data().get(); size_t* d_lengths = lengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; if( len < 1 ) return; // null string size_t offset = d_offsets[idx]; char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1); }); // //err = hipDeviceSynchronize(); //if( err!=hipSuccess ) //{ // fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count); // printCudaError(err); //} pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype ) { hipError_t err = hipSuccess; rmmError_t rerr = RMM_SUCCESS; auto execpol = rmm::exec_policy(0); thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes; if( !bdevmem ) { rerr = RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0); if( rerr == RMM_SUCCESS ) err = hipMemcpyAsync(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,hipMemcpyHostToDevice); } else { // Lets check what we got from the caller by reading all the memory once. // This is wasteful but I cannot keep people from passing bad data: // https://github.com/rapidsai/custrings/issues/191 // This check cannot be done inline below because libraries like thrust may terminate the process // when illegal pointers are passed in. Here we do a pre-check, handle the error and return it. // Do not put any other thrust calls before this line in this method. try { thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) custring_view::chars_in_string(str,(unsigned int)bytes); }); err = hipDeviceSynchronize(); // do not remove this } catch( thrust::system_error& exc ) { err = (hipError_t)exc.code().value(); //printf("exception: %d: %s\n", (int)err, e.what()); } } if( err != hipSuccess || rerr != RMM_SUCCESS ) { printCudaError(err,"nvs-idx: checking parms"); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // sort the list - helps reduce divergence if( stype ) { thrust::sort(execpol->on(0), d_indexes, d_indexes + count, [stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) { if( lhs.first==0 || rhs.first==0 ) return rhs.first!=0; // null < non-null int diff = 0; if( stype & NVStrings::length ) diff = (unsigned int)(lhs.second - rhs.second); if( diff==0 && (stype & NVStrings::name) ) diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second); return (diff < 0); }); } // first get the size we need to store these strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_sizes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes)); }); // allocate device memory size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end()); //printf("nvs-idx: %'lu bytes\n",nbytes); if( nbytes==0 ) { if( !bdevmem ) RMM_FREE(d_indexes,0); return 0; // done, all the strings were null } char* d_flatdstrs = nullptr; rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0); if( rerr != RMM_SUCCESS ) { fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr); //printCudaError(err); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build offsets array rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // now build the strings vector custring_view_array d_strings = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){ // add string to internal vector array const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; size_t offset = d_offsets[idx]; char* ptr = d_flatdstrs + offset; custring_view* dstr = 0; if( str ) dstr = custring_view::create_from(ptr,(char*)str,(int)bytes); d_strings[idx] = dstr; d_sizes[idx] = bytes; }); // pImpl->setMemoryBuffer(d_flatdstrs,nbytes); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build strings from pointer and array of offsets int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done hipError_t err = hipSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); thrust::host_vector<size_t> hlengths(count,0); for( int idx=0; idx < count; ++idx ) { int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec bytes = 0; hlengths[idx] = len; nbytes += ALIGN_SIZE(bytes); hoffsets[idx+1] = nbytes; } if( nbytes==0 ) return 0; // should not happen // serialize host memory into a new buffer unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]); // copy whole thing to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = hipMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, hipMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != hipSuccess ) { fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> doffsets(hoffsets); rmm::device_vector<size_t> dlengths(hlengths); size_t* d_offsets = doffsets.data().get(); size_t* d_lengths = dlengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; size_t offset = d_offsets[idx]; size_t size = d_offsets[idx+1] - offset; if( size < 1 ) return; // null string char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,len); }); // pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_device_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done auto execpol = rmm::exec_policy(0); // first compute the size of each string rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [strs, offsets, bitmask, d_sizes] __device__(unsigned int idx){ if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec return; int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); d_sizes[idx] = ALIGN_SIZE(bytes); }); // copy whole thing to device memory char* d_buffer = pImpl->createMemoryFor(d_sizes); if( !d_buffer ) return 0; // nothing to do // copy offsets and lengths to device memory rmm::device_vector<size_t> out_offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),out_offsets.begin()); size_t* d_out_offsets = out_offsets.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [strs, offsets, bitmask, d_buffer, d_out_offsets, d_strings] __device__(unsigned int idx){ if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) return; // null string int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* in_str = strs + offset; char* out_str = d_buffer + d_out_offsets[idx]; d_strings[idx] = custring_view::create_from(out_str,in_str,len); }); // return 0; } int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStringsImpl*>& strslist ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); size_t nbytes = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) nbytes += (*itr)->getMemorySize(); custring_view_array d_results = pList->data().get(); char* d_buffer = device_alloc<char>(nbytes,0); size_t ptr_offset = 0; size_t buffer_offset = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) { NVStringsImpl* strs = *itr; unsigned int size = strs->getCount(); size_t buffer_size = strs->getMemorySize(); if( size==0 ) continue; rmm::device_vector<custring_view*> strings(size,nullptr); custring_view** d_strings = strings.data().get(); // copy the pointers CUDA_TRY( hipMemcpyAsync( d_strings, strs->getStringsPtr(), size*sizeof(custring_view*), hipMemcpyDeviceToDevice)); if( buffer_size ) { // copy string memory char* baseaddr = strs->getMemoryPtr(); char* buffer = d_buffer + buffer_offset; CUDA_TRY( hipMemcpyAsync(buffer, baseaddr, buffer_size, hipMemcpyDeviceToDevice) ); // adjust pointers custring_view_array results = d_results + ptr_offset; thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size, [buffer, baseaddr, d_strings, results] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; results[idx] = (custring_view*)newaddr; }); } ptr_offset += size; buffer_offset += buffer_size; } // pImpl->setMemoryBuffer(d_buffer,nbytes); return count; } int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); custring_view_array d_strings = pImpl->getStringsPtr(); //---- the following can be used to find the base-address of the original memory ---- //---- instead of passing it across the ipc boundary; leaving it here for now ---- //custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count, // [] __device__ (custring_view* lhs, custring_view* rhs) { // return (lhs && rhs) ? (lhs < rhs) : rhs==0; // }); //hipError_t err = hipMemcpy(&baseaddr,first,sizeof(custring_view*),hipMemcpyDeviceToHost); //if( err!=hipSuccess ) // fprintf(stderr, "fixup: hipMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // char* buffer = pImpl->getMemoryPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [buffer, baseaddr, d_strings] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; d_strings[idx] = (custring_view*)newaddr; }); //hipError_t err = hipDeviceSynchronize(); //if( err!=hipSuccess ) // printCudaError(err,"nvs-fixup"); return count; }
c3f16f64740ea4c38c538941e2e8bd1dabb97c48.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <exception> #include <locale.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/for_each.h> #include <thrust/extrema.h> #include <thrust/sort.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <utilities/error_utils.hpp> #include "nvstrings/NVStrings.h" #include "./NVStringsImpl.h" #include "../custring_view.cuh" #include "../custring.cuh" #include "../unicode/unicode_flags.h" #include "../unicode/charcases.h" #include "../util.h" // void printCudaError( cudaError_t err, const char* prefix ) { if( err == cudaSuccess ) return; fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err)); //cudaError_t err2 = cudaGetLastError(); // clears the error too //if( err != err2 ) // fprintf(stderr," %s:(%d):%s\n",cudaGetErrorName(err2),(int)err2,cudaGetErrorString(err2)); } // char32_t* to_char32( const char* ca ) { unsigned int size = (unsigned int)strlen(ca); unsigned int count = custring_view::chars_in_string(ca,size); char32_t* rtn = new char32_t[count+1]; char32_t* optr = rtn; const char* iptr = ca; for( unsigned int i=0; i < size; ++i ) { Char oc = 0; unsigned int cw = custring_view::char_to_Char(iptr,oc); iptr += cw; i += cw - 1; *optr++ = oc; } rtn[count] = 0; return rtn; } // static unsigned char* d_unicode_flags = nullptr; unsigned char* get_unicode_flags() { if( !d_unicode_flags ) { // leave this out of RMM since it is never freed cudaMalloc(&d_unicode_flags,65536); cudaMemcpy(d_unicode_flags,unicode_flags,65536,cudaMemcpyHostToDevice); } return d_unicode_flags; } static unsigned short* d_charcases = nullptr; unsigned short* get_charcases() { if( !d_charcases ) { // leave this out of RMM since it is never freed cudaMalloc(&d_charcases,65536*sizeof(unsigned short)); cudaMemcpy(d_charcases,charcases,65536*sizeof(unsigned short),cudaMemcpyHostToDevice); } return d_charcases; } // NVStringsImpl::NVStringsImpl(unsigned int count) : bufferSize(0), memoryBuffer(nullptr), bIpcHandle(false), stream_id(0) { pList = new rmm::device_vector<custring_view*>(count,nullptr); } NVStringsImpl::~NVStringsImpl() { if( memoryBuffer && !bIpcHandle ) RMM_FREE(memoryBuffer,0); if( bIpcHandle ) cudaIpcCloseMemHandle(memoryBuffer); memoryBuffer = nullptr; delete pList; pList = nullptr; bufferSize = 0; } char* NVStringsImpl::createMemoryFor( size_t* d_lengths ) { unsigned int count = (unsigned int)pList->size(); auto execpol = rmm::exec_policy(stream_id); bufferSize = thrust::reduce(execpol->on(stream_id), d_lengths, d_lengths+count); if( bufferSize==0 ) return 0; // this is valid; all sizes are zero memoryBuffer = device_alloc<char>(bufferSize,stream_id); return memoryBuffer; } // int NVStrings_init_from_strings(NVStringsImpl* pImpl, const char** strs, unsigned int count ) { cudaError_t err = cudaSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); //hoffsets[0] = 0; --already set by this ----^ thrust::host_vector<size_t> hlengths(count,0); for( unsigned int idx=0; idx < count; ++idx ) { const char* str = strs[idx]; size_t len = ( str ? (strlen(str)+1) : 0 ); size_t nsz = len; // include null-terminator if( len > 0 ) // len=0 is null, len=1 is empty string { hlengths[idx] = len; // just the string length int nchars = custring_view::chars_in_string(str,(int)len-1); nsz = custring_view::alloc_size((int)len-1,nchars); } nsz = ALIGN_SIZE(nsz); nbytes += nsz; hoffsets[idx+1] = nbytes; } // check if they are all null if( nbytes==0 ) return (int)err; // Host serialization size_t cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); if( !h_flatstrs ) { fprintf(stderr,"init_from_strings: not enough CPU memory for intermediate buffer of size %ld bytes\n", nbytes); return -1; } for( unsigned int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs[idx], hlengths[idx]); // copy to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = cudaMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != cudaSuccess ) { fprintf(stderr,"nvs-sts: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> offsets(hoffsets); rmm::device_vector<size_t> lengths(hlengths); size_t* d_offsets = offsets.data().get(); size_t* d_lengths = lengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; if( len < 1 ) return; // null string size_t offset = d_offsets[idx]; char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,(int)len-1); }); // //err = cudaDeviceSynchronize(); //if( err!=cudaSuccess ) //{ // fprintf(stderr,"nvs-sts: sync=%d copy %'u strings\n",(int)err,count); // printCudaError(err); //} pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_indexes( NVStringsImpl* pImpl, std::pair<const char*,size_t>* indexes, unsigned int count, bool bdevmem, NVStrings::sorttype stype ) { cudaError_t err = cudaSuccess; rmmError_t rerr = RMM_SUCCESS; auto execpol = rmm::exec_policy(0); thrust::pair<const char*,size_t>* d_indexes = (thrust::pair<const char*,size_t>*)indexes; if( !bdevmem ) { rerr = RMM_ALLOC(&d_indexes,sizeof(std::pair<const char*,size_t>)*count,0); if( rerr == RMM_SUCCESS ) err = cudaMemcpyAsync(d_indexes,indexes,sizeof(std::pair<const char*,size_t>)*count,cudaMemcpyHostToDevice); } else { // Lets check what we got from the caller by reading all the memory once. // This is wasteful but I cannot keep people from passing bad data: // https://github.com/rapidsai/custrings/issues/191 // This check cannot be done inline below because libraries like thrust may terminate the process // when illegal pointers are passed in. Here we do a pre-check, handle the error and return it. // Do not put any other thrust calls before this line in this method. try { thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) custring_view::chars_in_string(str,(unsigned int)bytes); }); err = cudaDeviceSynchronize(); // do not remove this } catch( thrust::system_error& exc ) { err = (cudaError_t)exc.code().value(); //printf("exception: %d: %s\n", (int)err, e.what()); } } if( err != cudaSuccess || rerr != RMM_SUCCESS ) { printCudaError(err,"nvs-idx: checking parms"); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // sort the list - helps reduce divergence if( stype ) { thrust::sort(execpol->on(0), d_indexes, d_indexes + count, [stype] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) { if( lhs.first==0 || rhs.first==0 ) return rhs.first!=0; // null < non-null int diff = 0; if( stype & NVStrings::length ) diff = (unsigned int)(lhs.second - rhs.second); if( diff==0 && (stype & NVStrings::name) ) diff = custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second); return (diff < 0); }); } // first get the size we need to store these strings rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_sizes] __device__ (unsigned int idx) { const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; if( str ) d_sizes[idx] = ALIGN_SIZE(custring_view::alloc_size((char*)str,(int)bytes)); }); // allocate device memory size_t nbytes = thrust::reduce(execpol->on(0),sizes.begin(),sizes.end()); //printf("nvs-idx: %'lu bytes\n",nbytes); if( nbytes==0 ) { if( !bdevmem ) RMM_FREE(d_indexes,0); return 0; // done, all the strings were null } char* d_flatdstrs = nullptr; rerr = RMM_ALLOC(&d_flatdstrs,nbytes,0); if( rerr != RMM_SUCCESS ) { fprintf(stderr,"nvs-idx: RMM_ALLOC(%p,%lu)=%d\n", d_flatdstrs,nbytes,(int)rerr); //printCudaError(err); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build offsets array rmm::device_vector<size_t> offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),offsets.begin()); // now build the strings vector custring_view_array d_strings = pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_indexes, d_flatdstrs, d_offsets, d_sizes, d_strings] __device__(unsigned int idx){ // add string to internal vector array const char* str = d_indexes[idx].first; size_t bytes = d_indexes[idx].second; size_t offset = d_offsets[idx]; char* ptr = d_flatdstrs + offset; custring_view* dstr = 0; if( str ) dstr = custring_view::create_from(ptr,(char*)str,(int)bytes); d_strings[idx] = dstr; d_sizes[idx] = bytes; }); // pImpl->setMemoryBuffer(d_flatdstrs,nbytes); if( !bdevmem ) RMM_FREE(d_indexes,0); return (int)err; } // build strings from pointer and array of offsets int NVStrings_init_from_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done cudaError_t err = cudaSuccess; auto execpol = rmm::exec_policy(0); // first compute the size of each string size_t nbytes = 0; thrust::host_vector<size_t> hoffsets(count+1,0); thrust::host_vector<size_t> hlengths(count,0); for( int idx=0; idx < count; ++idx ) { int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec bytes = 0; hlengths[idx] = len; nbytes += ALIGN_SIZE(bytes); hoffsets[idx+1] = nbytes; } if( nbytes==0 ) return 0; // should not happen // serialize host memory into a new buffer unsigned int cheat = 0;//sizeof(custring_view); char* h_flatstrs = (char*)malloc(nbytes); for( int idx = 0; idx < count; ++idx ) memcpy(h_flatstrs + hoffsets[idx] + cheat, strs + offsets[idx], hlengths[idx]); // copy whole thing to device memory char* d_flatstrs = nullptr; rmmError_t rerr = RMM_ALLOC(&d_flatstrs,nbytes,0); if( rerr == RMM_SUCCESS ) err = cudaMemcpyAsync(d_flatstrs, h_flatstrs, nbytes, cudaMemcpyHostToDevice); free(h_flatstrs); // no longer needed if( err != cudaSuccess ) { fprintf(stderr,"nvs-ofs: alloc/copy %'lu bytes\n",nbytes); printCudaError(err); return (int)err; } // copy offsets and lengths to device memory rmm::device_vector<size_t> doffsets(hoffsets); rmm::device_vector<size_t> dlengths(hlengths); size_t* d_offsets = doffsets.data().get(); size_t* d_lengths = dlengths.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_flatstrs, d_offsets, d_lengths, cheat, d_strings] __device__(unsigned int idx){ size_t len = d_lengths[idx]; size_t offset = d_offsets[idx]; size_t size = d_offsets[idx+1] - offset; if( size < 1 ) return; // null string char* ptr = d_flatstrs + offset; char* str = ptr + cheat; d_strings[idx] = custring_view::create_from(ptr,str,len); }); // pImpl->setMemoryBuffer(d_flatstrs,nbytes); return (int)err; } // build strings from array of device pointers and sizes int NVStrings_init_from_device_offsets( NVStringsImpl* pImpl, const char* strs, int count, const int* offsets, const unsigned char* bitmask, int nulls ) { if( count==nulls ) return 0; // if all are nulls then we are done auto execpol = rmm::exec_policy(0); // first compute the size of each string rmm::device_vector<size_t> sizes(count,0); size_t* d_sizes = sizes.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [strs, offsets, bitmask, d_sizes] __device__(unsigned int idx){ if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) // from arrow spec return; int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* str = strs + offset; int nchars = custring_view::chars_in_string(str,len); int bytes = custring_view::alloc_size(len,nchars); d_sizes[idx] = ALIGN_SIZE(bytes); }); // copy whole thing to device memory char* d_buffer = pImpl->createMemoryFor(d_sizes); if( !d_buffer ) return 0; // nothing to do // copy offsets and lengths to device memory rmm::device_vector<size_t> out_offsets(count,0); thrust::exclusive_scan(execpol->on(0),sizes.begin(),sizes.end(),out_offsets.begin()); size_t* d_out_offsets = out_offsets.data().get(); // initialize custring objects in device memory custring_view_array d_strings = pImpl->getStringsPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [strs, offsets, bitmask, d_buffer, d_out_offsets, d_strings] __device__(unsigned int idx){ if( bitmask && ((bitmask[idx/8] & (1 << (idx % 8)))==0) ) return; // null string int offset = offsets[idx]; int len = offsets[idx+1] - offset; const char* in_str = strs + offset; char* out_str = d_buffer + d_out_offsets[idx]; d_strings[idx] = custring_view::create_from(out_str,in_str,len); }); // return 0; } int NVStrings_copy_strings( NVStringsImpl* pImpl, std::vector<NVStringsImpl*>& strslist ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); size_t nbytes = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) nbytes += (*itr)->getMemorySize(); custring_view_array d_results = pList->data().get(); char* d_buffer = device_alloc<char>(nbytes,0); size_t ptr_offset = 0; size_t buffer_offset = 0; for( auto itr=strslist.begin(); itr!=strslist.end(); itr++ ) { NVStringsImpl* strs = *itr; unsigned int size = strs->getCount(); size_t buffer_size = strs->getMemorySize(); if( size==0 ) continue; rmm::device_vector<custring_view*> strings(size,nullptr); custring_view** d_strings = strings.data().get(); // copy the pointers CUDA_TRY( cudaMemcpyAsync( d_strings, strs->getStringsPtr(), size*sizeof(custring_view*), cudaMemcpyDeviceToDevice)); if( buffer_size ) { // copy string memory char* baseaddr = strs->getMemoryPtr(); char* buffer = d_buffer + buffer_offset; CUDA_TRY( cudaMemcpyAsync(buffer, baseaddr, buffer_size, cudaMemcpyDeviceToDevice) ); // adjust pointers custring_view_array results = d_results + ptr_offset; thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), size, [buffer, baseaddr, d_strings, results] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; results[idx] = (custring_view*)newaddr; }); } ptr_offset += size; buffer_offset += buffer_size; } // pImpl->setMemoryBuffer(d_buffer,nbytes); return count; } int NVStrings_fixup_pointers( NVStringsImpl* pImpl, char* baseaddr ) { auto execpol = rmm::exec_policy(0); auto pList = pImpl->pList; unsigned int count = (unsigned int)pList->size(); custring_view_array d_strings = pImpl->getStringsPtr(); //---- the following can be used to find the base-address of the original memory ---- //---- instead of passing it across the ipc boundary; leaving it here for now ---- //custring_view** first = thrust::min_element(execpol->on(0),d_strings,d_strings+count, // [] __device__ (custring_view* lhs, custring_view* rhs) { // return (lhs && rhs) ? (lhs < rhs) : rhs==0; // }); //cudaError_t err = cudaMemcpy(&baseaddr,first,sizeof(custring_view*),cudaMemcpyDeviceToHost); //if( err!=cudaSuccess ) // fprintf(stderr, "fixup: cudaMemcpy(%p,%p,%d)=%d\n",&baseaddr,first,(int)sizeof(custring_view*),(int)err); // char* buffer = pImpl->getMemoryPtr(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [buffer, baseaddr, d_strings] __device__(unsigned int idx){ char* dstr = (char*)d_strings[idx]; if( !dstr ) return; size_t diff = dstr - baseaddr; char* newaddr = buffer + diff; d_strings[idx] = (custring_view*)newaddr; }); //cudaError_t err = cudaDeviceSynchronize(); //if( err!=cudaSuccess ) // printCudaError(err,"nvs-fixup"); return count; }
32420581c4720946017c76676816559eb38318da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define BLOCK_SIZE_2D 16 #define BLOCK_SIZE_1D 256 #include "utils.h" #include "Exception.h" __device__ float sigmoid(float z) { return 1.0f / (1.0f + __expf(-z)); } __global__ void kernelSigmoidFX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = bias[j] + src[i * n + j]; dest[i * n + j] = sigmoid(z); } } __global__ void kernelSigmoidDX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = bias[j] + src[i * n + j]; dest[i * n + j] = sigmoid(z) * (1 - sigmoid(z)); } } __global__ void fastSigmoidKernel(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float x = src[i * n + j]; dest[i * n + j] = x / (1 + abs(x)); } } __global__ void kernelTanh(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = src[i * n + j]; dest[i * n + j] = tanhf(z); } } __global__ void kernelCopyMatrix(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { dest[i * n + j] = src[i * n + j]; } } __global__ void kernelMatrixAdd(unsigned int m, unsigned int n, float* a, float* b, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] + b[absIdx]; } } __global__ void kernelMatrixEleMult(unsigned int m, unsigned int n, float* a, float* b, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] * b[absIdx]; } } __global__ void kernelMatrixPow(unsigned int m, unsigned int n, float* a, float exp, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = powf(a[absIdx], exp); } } __global__ void kernelMatrixSelect(unsigned int n, unsigned int ms, unsigned int ns, unsigned int sm, unsigned int sn, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < sm && j < sn) { unsigned int absIdx = i * sn + j; dest[absIdx] = src[(ms + i) * n + (ns + j)]; } } __global__ void kernelCopyVector(unsigned int length, float* src, float* dest) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < length) { dest[i] = src[i]; } } __global__ void kernelMatrixInsertCol(unsigned int m, unsigned int n, unsigned int col, float val, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { if (j < col) { dest[i * n + j] = src[i * (n - 1) + j]; } else if (j == col) { dest[i * n + j] = val; } else { dest[i * n + j] = src[i * (n - 1) + j - 1]; } } } __global__ void kernelMatrixInsertColVector(unsigned int m, unsigned int n, unsigned int col, float* vec, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { if (j < col) { dest[i * n + j] = src[i * (n - 1) + j]; } else if (j == col) { dest[i * n + j] = vec[i]; } else { dest[i * n + j] = src[i * (n - 1) + j - 1]; } } } __global__ void kernelMatrixTranspose(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { dest[j * m + i] = src[i * n + j]; } } void applySigmoidFX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelSigmoidFX), dim3(grid), dim3(block), 0, 0, m, n, src, dest, bias); } void applySigmoidDX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelSigmoidDX), dim3(grid), dim3(block), 0, 0, m, n, src, dest, bias); } void matrixAdd(unsigned int m, unsigned int n, float* a, float* b, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixAdd), dim3(grid), dim3(block), 0, 0, m, n, a, b, dest); } void matrixEleMult(unsigned int m, unsigned int n, float* a, float* b, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixEleMult), dim3(grid), dim3(block), 0, 0, m, n, a, b, dest); } void matrixPow(unsigned int m, unsigned int n, float* a, float exp, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixPow), dim3(grid), dim3(block), 0, 0, m, n, a, exp, dest); } void matrixSelect(unsigned int m, unsigned int n, unsigned int ms, unsigned int ns, unsigned int me, unsigned int ne, float* src, float* dest) { if (ms >= me) { throw Exception( "Invalid argument ms must be lower than me. Got ms: " + to_string(ns) + ", me: " + to_string(me) + "."); } if (ns >= ne) { throw Exception( "Invalid argument ns must be lower than ne. Got ns: " + to_string(ns) + ", ne: " + to_string(ne) + "."); } if (me > m) { throw Exception( "Selection end is out of bounds. Expected <= " + to_string(m) + ", but got " + to_string(me) + " instead."); } if (ne > n) { throw Exception( "Selection end for columns is out of bounds. Expected <= " + to_string(n) + ", but got " + to_string(ne) + " instead."); } unsigned int sm = me - ms; //selection rows unsigned int sn = ne - ns; //selection cols dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(sn / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(sm / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixSelect), dim3(grid), dim3(block), 0, 0, n, ms, ns, sm, sn, src, dest); } void copyVector(unsigned int length, float* src, float* dest) { unsigned int block = BLOCK_SIZE_1D; unsigned int grid = (unsigned int) ceil(length / (double) BLOCK_SIZE_1D); hipLaunchKernelGGL(( kernelCopyVector), dim3(grid), dim3(block), 0, 0, length, src, dest); } void copyMatrix(unsigned int m, unsigned int n, float* src, float* dest) { checkCuda(hipMemcpy(dest, src, sizeof(float) * m * n, hipMemcpyDeviceToDevice)); } void matrixInsertCol(unsigned int m, unsigned int n, unsigned int col, float val, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil((n + 1) / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixInsertCol), dim3(grid), dim3(block), 0, 0, m, n + 1, col, val, src, dest); } void matrixInsertColVector(unsigned int m, unsigned int n, unsigned int col, float* vec, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil((n + 1) / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixInsertColVector), dim3(grid), dim3(block), 0, 0, m, n + 1, col, vec, src, dest); } void matrixTranspose(unsigned int m, unsigned int n, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); hipLaunchKernelGGL(( kernelMatrixTranspose), dim3(grid), dim3(block), 0, 0, m, n, src, dest); }
32420581c4720946017c76676816559eb38318da.cu
#define BLOCK_SIZE_2D 16 #define BLOCK_SIZE_1D 256 #include "utils.h" #include "Exception.h" __device__ float sigmoid(float z) { return 1.0f / (1.0f + __expf(-z)); } __global__ void kernelSigmoidFX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = bias[j] + src[i * n + j]; dest[i * n + j] = sigmoid(z); } } __global__ void kernelSigmoidDX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = bias[j] + src[i * n + j]; dest[i * n + j] = sigmoid(z) * (1 - sigmoid(z)); } } __global__ void fastSigmoidKernel(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float x = src[i * n + j]; dest[i * n + j] = x / (1 + abs(x)); } } __global__ void kernelTanh(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { float z = src[i * n + j]; dest[i * n + j] = tanhf(z); } } __global__ void kernelCopyMatrix(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { dest[i * n + j] = src[i * n + j]; } } __global__ void kernelMatrixAdd(unsigned int m, unsigned int n, float* a, float* b, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] + b[absIdx]; } } __global__ void kernelMatrixEleMult(unsigned int m, unsigned int n, float* a, float* b, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] * b[absIdx]; } } __global__ void kernelMatrixPow(unsigned int m, unsigned int n, float* a, float exp, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = powf(a[absIdx], exp); } } __global__ void kernelMatrixSelect(unsigned int n, unsigned int ms, unsigned int ns, unsigned int sm, unsigned int sn, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < sm && j < sn) { unsigned int absIdx = i * sn + j; dest[absIdx] = src[(ms + i) * n + (ns + j)]; } } __global__ void kernelCopyVector(unsigned int length, float* src, float* dest) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < length) { dest[i] = src[i]; } } __global__ void kernelMatrixInsertCol(unsigned int m, unsigned int n, unsigned int col, float val, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { if (j < col) { dest[i * n + j] = src[i * (n - 1) + j]; } else if (j == col) { dest[i * n + j] = val; } else { dest[i * n + j] = src[i * (n - 1) + j - 1]; } } } __global__ void kernelMatrixInsertColVector(unsigned int m, unsigned int n, unsigned int col, float* vec, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { if (j < col) { dest[i * n + j] = src[i * (n - 1) + j]; } else if (j == col) { dest[i * n + j] = vec[i]; } else { dest[i * n + j] = src[i * (n - 1) + j - 1]; } } } __global__ void kernelMatrixTranspose(unsigned int m, unsigned int n, float* src, float* dest) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { dest[j * m + i] = src[i * n + j]; } } void applySigmoidFX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelSigmoidFX<<<grid, block>>>(m, n, src, dest, bias); } void applySigmoidDX(unsigned int m, unsigned int n, float* src, float* dest, float* bias) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelSigmoidDX<<<grid, block>>>(m, n, src, dest, bias); } void matrixAdd(unsigned int m, unsigned int n, float* a, float* b, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixAdd<<<grid, block>>>(m, n, a, b, dest); } void matrixEleMult(unsigned int m, unsigned int n, float* a, float* b, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixEleMult<<<grid, block>>>(m, n, a, b, dest); } void matrixPow(unsigned int m, unsigned int n, float* a, float exp, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixPow<<<grid, block>>>(m, n, a, exp, dest); } void matrixSelect(unsigned int m, unsigned int n, unsigned int ms, unsigned int ns, unsigned int me, unsigned int ne, float* src, float* dest) { if (ms >= me) { throw Exception( "Invalid argument ms must be lower than me. Got ms: " + to_string(ns) + ", me: " + to_string(me) + "."); } if (ns >= ne) { throw Exception( "Invalid argument ns must be lower than ne. Got ns: " + to_string(ns) + ", ne: " + to_string(ne) + "."); } if (me > m) { throw Exception( "Selection end is out of bounds. Expected <= " + to_string(m) + ", but got " + to_string(me) + " instead."); } if (ne > n) { throw Exception( "Selection end for columns is out of bounds. Expected <= " + to_string(n) + ", but got " + to_string(ne) + " instead."); } unsigned int sm = me - ms; //selection rows unsigned int sn = ne - ns; //selection cols dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(sn / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(sm / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixSelect<<<grid, block>>>(n, ms, ns, sm, sn, src, dest); } void copyVector(unsigned int length, float* src, float* dest) { unsigned int block = BLOCK_SIZE_1D; unsigned int grid = (unsigned int) ceil(length / (double) BLOCK_SIZE_1D); kernelCopyVector<<<grid, block>>>(length, src, dest); } void copyMatrix(unsigned int m, unsigned int n, float* src, float* dest) { checkCuda(cudaMemcpy(dest, src, sizeof(float) * m * n, cudaMemcpyDeviceToDevice)); } void matrixInsertCol(unsigned int m, unsigned int n, unsigned int col, float val, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil((n + 1) / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixInsertCol<<<grid, block>>>(m, n + 1, col, val, src, dest); } void matrixInsertColVector(unsigned int m, unsigned int n, unsigned int col, float* vec, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil((n + 1) / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixInsertColVector<<<grid, block>>>(m, n + 1, col, vec, src, dest); } void matrixTranspose(unsigned int m, unsigned int n, float* src, float* dest) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernelMatrixTranspose<<<grid, block>>>(m, n, src, dest); }
d7b0e1f23b46368ae8a4ea1e26b7c59972ad03d9.hip
// !!! This is a file automatically generated by hipify!!! // INPUT is ./ODE numThreads N*501 #include <iostream> #include <vector> #include "cuda_complex.hpp" #include <math.h> #include <random> #include <chrono> #include "stopwatch.hpp" #include <hip/hip_runtime.h> const double b_sigma = 0.01; // normal distribution std deviation for b /* set b0 values to increment: # points = (MAX-MIN)/INCR+1 */ const float MIN_b0 = 1.5125; const float MAX_b0 = 1.5225; const float INCR_b0 =0.00001; const int SIZE_b0 = int((MAX_b0-MIN_b0)/INCR_b0)+1; /* set C values to increment: # points = (MAX-MIN)/INCR+1 */ const float MIN_C = 0.0175; const float MAX_C = 0.0225; double INCR_C= 0.0001; int SIZE_C = int((MAX_C-MIN_C)/INCR_C)+1; /* Set resolution in length along device ('time' for ODE solvers) */ const float L_max = 150.0; //const float L_inc = 0.001; const int B_LENGTH = int(L_max) + 1; // length of the b vector //const double d = 0; //const double QC = 1.85*0.25; __global__ void ODE_Kernel(float* db0_in, float* dC_in,float *db_rand, float* dLength,double* dGain, int num_loops, int b_length) { double dt = 0.001; int idx = blockIdx.x*blockDim.x + threadIdx.x; // // could implement shared memory method for interpolating b(x) // if (idx < num_loops) { double b0 = double(db0_in[idx]); double C = double(dC_in[idx]); double QC = 0.25*1.85; // change space charge constant consistent with changes in C but keeping Q fixed double d = 0.0; //state initialization complex<double> x0(0.0,0.0); complex<double> x1(0.0,0.0); complex<double> x2(1.0,0.0); complex<double> i(0,1); // attempted to minimize 150000*4*4 repeated multiplications but made no diffrence or took longer //complex<double> const1 = -i*C*C*C; // Solve ODE using for(int n = 0; n < 150000; n++) { //[ this should be replace with shared memory above // calculate current b int T_low = int(floor(n*dt)); int T_high = T_low + 1; if (T_low >= b_length-1) { T_high = T_low; } double b = b0 + db_rand[T_low] + double(n*dt-T_low)*(db_rand[T_high]-db_rand[T_low]); //] // perhaps k,l,m can be simplified to reduce the number of registers complex<double> k0 = dt*x1; complex<double> l0 = dt*x2; complex<double> m0 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*x0 - 4.0*QC*C*C*x1 - i*C*(b + i*d)*x2); complex<double> k1 = dt*(x1+0.5*l0); complex<double> l1 = dt*(x2+0.5*m0); complex<double> m1 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k0) - 4.0*QC*C*C*(x1+0.5*l0) - i*C*(b + i*d)*(x2+0.5*m0));; complex<double> k2 = dt*(x1+0.5*l1); complex<double> l2 = dt*(x2+0.5*m1); complex<double> m2 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k1) - 4.0*QC*C*C*(x1+0.5*l1) - i*C*(b + i*d)*(x2+0.5*m1));; complex<double> k3 = dt*(x1+0.5*l2); complex<double> l3 = dt*(x2+0.5*m2); complex<double> m3 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k2) - 4.0*QC*C*C*(x1+0.5*l2) - i*C*(b + i*d)*(x2+0.5*m2));; x0 = x0 + 1.0/6.0*(k0 + k1+k1 + k2+k2 + k3); x1 = x1 + 1.0/6.0*(l0 + l1+l1 + l2+l2 + l3); x2 = x2 + 1.0/6.0*(m0 + m1+m1 + m2+m2 + m3); //] // calculate gain(x) double GAIN = ::pow(abs( 1.0/(x2 + 4.0*QC*C*C*x0)),2); // test for best gain if ( dGain[idx] < GAIN) { dLength[idx] = float(n*dt); // store current length if best gain dGain[idx] = GAIN; // store Gain if best gain } } // change gain to dB units dGain[idx] = 10*std::log10(dGain[idx]); } } int main(int argc , char* argv[] ) { int num_threads = 128; //default num_threads if (argc >= 2) { num_threads = atoi(argv[1]); } if (argc >= 3) { SIZE_C = atoi(argv[2]); INCR_C = (MAX_C-MIN_C)/double(SIZE_C-1); } int num_loops = SIZE_b0*SIZE_C; // vectors for input data float *b0_in = (float*)malloc(sizeof(float)*SIZE_b0); float *C_in = (float*)malloc(sizeof(float)*SIZE_C); // create b0_in and C_in vectors b0_in[0] = MIN_b0; C_in[0] = MIN_C; for (int b = 1; b < SIZE_b0; b++) { b0_in[b] = b0_in[b-1] + INCR_b0; } for (int c = 1; c < SIZE_C; c++) { C_in[c] = C_in[c-1] + INCR_C; } // vectors for output data float *C_out = (float*)malloc(sizeof(float)*num_loops); float *b0_out = (float*)malloc(sizeof(float)*num_loops); float *b_rand = (float*)malloc(sizeof(float)*B_LENGTH); double *gain_dB_out = (double*)malloc(sizeof(double)*num_loops); float *length_out = (float*)malloc(sizeof(float)*num_loops); // create C_out and b0_out vectors int idx = 0; for (int b = 0; b<SIZE_b0; b++) { for (int c = 0; c<SIZE_C; c++) { b0_out[idx] = b0_in[b]; C_out[idx] = C_in[c]; idx++; } } // set up random b(x) distribution unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::normal_distribution<float> distribution(0,b_sigma); for (int n = 0; n < B_LENGTH; n++) { b_rand[n] = distribution(generator); } // std::cout << SIZE_b0 << ' ' << ' ' << SIZE_C << '\n'; // std::cout << b0_in[0] << ' ' << b0_in[SIZE_b0-1] << '\t' << C_in[0] << '\t' << C_in[SIZE_C -1] << '\n'; // Allocate device memory float *db0_in, *dC_in, *db_rand, *dLength; double *dGain; // START TIMER hipEvent_t startTime, stopTime; float time; hipEventCreate(&startTime); hipEventCreate(&stopTime); hipEventRecord(startTime,0); hipMalloc((void**)&db0_in, sizeof(float)*num_loops); hipMalloc((void**)&dC_in, sizeof(float)*num_loops); hipMalloc((void**)&db_rand, sizeof(float)*B_LENGTH); hipMalloc((void**)&dLength, sizeof(float)*num_loops); hipMalloc((void**)&dGain, sizeof(double)*num_loops); // send all data to GPU hipMemcpy(db0_in, b0_out, sizeof(float)*num_loops, hipMemcpyHostToDevice); hipMemcpy(dC_in, C_out, sizeof(float)*num_loops, hipMemcpyHostToDevice); hipMemcpy(db_rand, b_rand, sizeof(float)*B_LENGTH, hipMemcpyHostToDevice); hipMemset(dGain, 0, sizeof(double)*num_loops); hipMemset(dLength, 0, sizeof(float)*num_loops); // Call Cuda Kernels to solve ODE // set by argv[] //int num_threads = 512; int num_blocks = int(num_loops/num_threads)+1; hipLaunchKernelGGL(( ODE_Kernel), dim3(num_blocks),dim3(num_threads), 0, 0, db0_in,dC_in,db_rand,dLength,dGain, num_loops, B_LENGTH); hipMemcpy(length_out, dLength, sizeof(float)*num_loops, hipMemcpyDeviceToHost); hipMemcpy(gain_dB_out, dGain, sizeof(double)*num_loops, hipMemcpyDeviceToHost); // stop inclusive timer hipEventRecord(stopTime,0); hipEventSynchronize(stopTime); hipEventElapsedTime(&time, startTime, stopTime); hipEventDestroy(startTime); hipEventDestroy(stopTime); // print output data to terminal // std::cout << 'C' << '\t' << "b0" << '\t' << "maxLength" << '\t' << "maxGain" << '\t' << "maxGain[dB]" << std::endl; // std::cout << "---------------------------------------------------" << std::endl; // for(int n = 0; n < num_loops; n++) { // std::cout << C_out[n] << '\t' << b0_out[n] << '\t' << length_out[n] << '\t' << gain_dB_out[n] << '\t'<< n << '\n'; // } //std::cout << "time[ms]" << '\t' << "num_loops" << '\t'<< "ms/loop" << '\t' << "num_threads" << '\n'; std::cout << time << '\t' << num_loops << '\t'<< time / float(num_loops) << '\t' << num_threads << '\n'; // free device memory hipFree(db0_in); hipFree(dC_in); hipFree(db_rand); hipFree(dLength); hipFree(dGain); // free host memory free(length_out); free(gain_dB_out); free(C_in); free(C_out); free(b0_in); free(b0_out); free(b_rand); }
d7b0e1f23b46368ae8a4ea1e26b7c59972ad03d9.cu
// INPUT is ./ODE numThreads N*501 #include <iostream> #include <vector> #include "cuda_complex.hpp" #include <math.h> #include <random> #include <chrono> #include "stopwatch.hpp" #include <cuda.h> const double b_sigma = 0.01; // normal distribution std deviation for b /* set b0 values to increment: # points = (MAX-MIN)/INCR+1 */ const float MIN_b0 = 1.5125; const float MAX_b0 = 1.5225; const float INCR_b0 =0.00001; const int SIZE_b0 = int((MAX_b0-MIN_b0)/INCR_b0)+1; /* set C values to increment: # points = (MAX-MIN)/INCR+1 */ const float MIN_C = 0.0175; const float MAX_C = 0.0225; double INCR_C= 0.0001; int SIZE_C = int((MAX_C-MIN_C)/INCR_C)+1; /* Set resolution in length along device ('time' for ODE solvers) */ const float L_max = 150.0; //const float L_inc = 0.001; const int B_LENGTH = int(L_max) + 1; // length of the b vector //const double d = 0; //const double QC = 1.85*0.25; __global__ void ODE_Kernel(float* db0_in, float* dC_in,float *db_rand, float* dLength,double* dGain, int num_loops, int b_length) { double dt = 0.001; int idx = blockIdx.x*blockDim.x + threadIdx.x; // // could implement shared memory method for interpolating b(x) // if (idx < num_loops) { double b0 = double(db0_in[idx]); double C = double(dC_in[idx]); double QC = 0.25*1.85; // change space charge constant consistent with changes in C but keeping Q fixed double d = 0.0; //state initialization complex<double> x0(0.0,0.0); complex<double> x1(0.0,0.0); complex<double> x2(1.0,0.0); complex<double> i(0,1); // attempted to minimize 150000*4*4 repeated multiplications but made no diffrence or took longer //complex<double> const1 = -i*C*C*C; // Solve ODE using for(int n = 0; n < 150000; n++) { //[ this should be replace with shared memory above // calculate current b int T_low = int(floor(n*dt)); int T_high = T_low + 1; if (T_low >= b_length-1) { T_high = T_low; } double b = b0 + db_rand[T_low] + double(n*dt-T_low)*(db_rand[T_high]-db_rand[T_low]); //] // perhaps k,l,m can be simplified to reduce the number of registers complex<double> k0 = dt*x1; complex<double> l0 = dt*x2; complex<double> m0 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*x0 - 4.0*QC*C*C*x1 - i*C*(b + i*d)*x2); complex<double> k1 = dt*(x1+0.5*l0); complex<double> l1 = dt*(x2+0.5*m0); complex<double> m1 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k0) - 4.0*QC*C*C*(x1+0.5*l0) - i*C*(b + i*d)*(x2+0.5*m0));; complex<double> k2 = dt*(x1+0.5*l1); complex<double> l2 = dt*(x2+0.5*m1); complex<double> m2 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k1) - 4.0*QC*C*C*(x1+0.5*l1) - i*C*(b + i*d)*(x2+0.5*m1));; complex<double> k3 = dt*(x1+0.5*l2); complex<double> l3 = dt*(x2+0.5*m2); complex<double> m3 = dt*(-i*C*C*C*(4.0*QC*(b+i*d)-1.0)*(x0+0.5*k2) - 4.0*QC*C*C*(x1+0.5*l2) - i*C*(b + i*d)*(x2+0.5*m2));; x0 = x0 + 1.0/6.0*(k0 + k1+k1 + k2+k2 + k3); x1 = x1 + 1.0/6.0*(l0 + l1+l1 + l2+l2 + l3); x2 = x2 + 1.0/6.0*(m0 + m1+m1 + m2+m2 + m3); //] // calculate gain(x) double GAIN = std::pow(abs( 1.0/(x2 + 4.0*QC*C*C*x0)),2); // test for best gain if ( dGain[idx] < GAIN) { dLength[idx] = float(n*dt); // store current length if best gain dGain[idx] = GAIN; // store Gain if best gain } } // change gain to dB units dGain[idx] = 10*std::log10(dGain[idx]); } } int main(int argc , char* argv[] ) { int num_threads = 128; //default num_threads if (argc >= 2) { num_threads = atoi(argv[1]); } if (argc >= 3) { SIZE_C = atoi(argv[2]); INCR_C = (MAX_C-MIN_C)/double(SIZE_C-1); } int num_loops = SIZE_b0*SIZE_C; // vectors for input data float *b0_in = (float*)malloc(sizeof(float)*SIZE_b0); float *C_in = (float*)malloc(sizeof(float)*SIZE_C); // create b0_in and C_in vectors b0_in[0] = MIN_b0; C_in[0] = MIN_C; for (int b = 1; b < SIZE_b0; b++) { b0_in[b] = b0_in[b-1] + INCR_b0; } for (int c = 1; c < SIZE_C; c++) { C_in[c] = C_in[c-1] + INCR_C; } // vectors for output data float *C_out = (float*)malloc(sizeof(float)*num_loops); float *b0_out = (float*)malloc(sizeof(float)*num_loops); float *b_rand = (float*)malloc(sizeof(float)*B_LENGTH); double *gain_dB_out = (double*)malloc(sizeof(double)*num_loops); float *length_out = (float*)malloc(sizeof(float)*num_loops); // create C_out and b0_out vectors int idx = 0; for (int b = 0; b<SIZE_b0; b++) { for (int c = 0; c<SIZE_C; c++) { b0_out[idx] = b0_in[b]; C_out[idx] = C_in[c]; idx++; } } // set up random b(x) distribution unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); std::default_random_engine generator (seed); std::normal_distribution<float> distribution(0,b_sigma); for (int n = 0; n < B_LENGTH; n++) { b_rand[n] = distribution(generator); } // std::cout << SIZE_b0 << ' ' << ' ' << SIZE_C << '\n'; // std::cout << b0_in[0] << ' ' << b0_in[SIZE_b0-1] << '\t' << C_in[0] << '\t' << C_in[SIZE_C -1] << '\n'; // Allocate device memory float *db0_in, *dC_in, *db_rand, *dLength; double *dGain; // START TIMER cudaEvent_t startTime, stopTime; float time; cudaEventCreate(&startTime); cudaEventCreate(&stopTime); cudaEventRecord(startTime,0); cudaMalloc((void**)&db0_in, sizeof(float)*num_loops); cudaMalloc((void**)&dC_in, sizeof(float)*num_loops); cudaMalloc((void**)&db_rand, sizeof(float)*B_LENGTH); cudaMalloc((void**)&dLength, sizeof(float)*num_loops); cudaMalloc((void**)&dGain, sizeof(double)*num_loops); // send all data to GPU cudaMemcpy(db0_in, b0_out, sizeof(float)*num_loops, cudaMemcpyHostToDevice); cudaMemcpy(dC_in, C_out, sizeof(float)*num_loops, cudaMemcpyHostToDevice); cudaMemcpy(db_rand, b_rand, sizeof(float)*B_LENGTH, cudaMemcpyHostToDevice); cudaMemset(dGain, 0, sizeof(double)*num_loops); cudaMemset(dLength, 0, sizeof(float)*num_loops); // Call Cuda Kernels to solve ODE // set by argv[] //int num_threads = 512; int num_blocks = int(num_loops/num_threads)+1; ODE_Kernel<<<num_blocks,num_threads>>>(db0_in,dC_in,db_rand,dLength,dGain, num_loops, B_LENGTH); cudaMemcpy(length_out, dLength, sizeof(float)*num_loops, cudaMemcpyDeviceToHost); cudaMemcpy(gain_dB_out, dGain, sizeof(double)*num_loops, cudaMemcpyDeviceToHost); // stop inclusive timer cudaEventRecord(stopTime,0); cudaEventSynchronize(stopTime); cudaEventElapsedTime(&time, startTime, stopTime); cudaEventDestroy(startTime); cudaEventDestroy(stopTime); // print output data to terminal // std::cout << 'C' << '\t' << "b0" << '\t' << "maxLength" << '\t' << "maxGain" << '\t' << "maxGain[dB]" << std::endl; // std::cout << "---------------------------------------------------" << std::endl; // for(int n = 0; n < num_loops; n++) { // std::cout << C_out[n] << '\t' << b0_out[n] << '\t' << length_out[n] << '\t' << gain_dB_out[n] << '\t'<< n << '\n'; // } //std::cout << "time[ms]" << '\t' << "num_loops" << '\t'<< "ms/loop" << '\t' << "num_threads" << '\n'; std::cout << time << '\t' << num_loops << '\t'<< time / float(num_loops) << '\t' << num_threads << '\n'; // free device memory cudaFree(db0_in); cudaFree(dC_in); cudaFree(db_rand); cudaFree(dLength); cudaFree(dGain); // free host memory free(length_out); free(gain_dB_out); free(C_in); free(C_out); free(b0_in); free(b0_out); free(b_rand); }
01d0570d3d4809a85a50c61e91d7455a6f38ccab.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> //srand() //#include <stdbool.h> //#define block 514 extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK); extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK); //(2) (global ). __global__ void smooth_global(float* b, float* a, int n){ int k = blockIdx.x*blockDim.x+threadIdx.x; if(k==0){ b[k]=(2*a[0]+a[1])*0.25; } else if(k==n-1){ b[k]=(a[n-2]+2*a[n-1])*0.25; } else if(k<n){ b[k]=(a[k-1]+2*a[k]+a[k+1])*0.25; } } //(3) (shared ). __global__ void smooth_shared(float* b, float* a, int n, int BLOCK){ int base = blockIdx.x*blockDim.x; int t = threadIdx.x; //__shared__ float s[BLOCK+2];//. extern __shared__ float s[];//. // s[1]~s[BLOCK] // s[0] <-- a[base-1] () // s[1] <-- a[base] // s[2] <-- a[base+1] // s[3] <-- a[base+2] // ... // s[BLOCK] <-- a[base+BLOCK-1] // s[BLOCK+1] <-- a[base+BLOCK] () if(base+t<n){ s[t+1]=a[base+t]; } if(t==0){ //. if(base==0){ s[0]=0; } else{ s[0]=a[base-1]; // s[0] & s[BLOCK+1] () } } if(t==32){ //*** warp branch *** if(base+BLOCK>=n){ //. s[n-base+1]=0; } else{ s[BLOCK+1] = a[base+BLOCK]; } } __syncthreads(); // () if(base+t<n){ b[base+t]=(s[t]+2*s[t+1]+s[t+2])*0.25; // } }; extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK) { dim3 block(BLOCK, 1, 1); dim3 grid(n/BLOCK+1, 1, 1); for(int k=0; k<loop; k++) { hipLaunchKernelGGL(( smooth_global), dim3(grid), dim3(block) , 0, 0, b, a, n); } } extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK) { dim3 block(BLOCK, 1, 1); dim3 grid(n/BLOCK+1, 1, 1); for(int k=0; k<loop; k++){ hipLaunchKernelGGL(( smooth_shared), dim3(grid), dim3(block), BLOCK+2 , 0, b, a, n, BLOCK); } }
01d0570d3d4809a85a50c61e91d7455a6f38ccab.cu
#include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> //srand() //#include <stdbool.h> //#define block 514 extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK); extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK); //(2) 裝置核心(global 版). __global__ void smooth_global(float* b, float* a, int n){ int k = blockIdx.x*blockDim.x+threadIdx.x; if(k==0){ b[k]=(2*a[0]+a[1])*0.25; } else if(k==n-1){ b[k]=(a[n-2]+2*a[n-1])*0.25; } else if(k<n){ b[k]=(a[k-1]+2*a[k]+a[k+1])*0.25; } } //(3) 裝置核心(shared 版). __global__ void smooth_shared(float* b, float* a, int n, int BLOCK){ int base = blockIdx.x*blockDim.x; int t = threadIdx.x; //__shared__ float s[BLOCK+2];//宣告共享記憶體. extern __shared__ float s[];//宣告共享記憶體. //載入主要資料 s[1]~s[BLOCK] // s[0] <-- a[base-1] (左邊界) // s[1] <-- a[base] // s[2] <-- a[base+1] // s[3] <-- a[base+2] // ... // s[BLOCK] <-- a[base+BLOCK-1] // s[BLOCK+1] <-- a[base+BLOCK] (右邊界) if(base+t<n){ s[t+1]=a[base+t]; } if(t==0){ //左邊界. if(base==0){ s[0]=0; } else{ s[0]=a[base-1]; //載入邊界資料 s[0] & s[BLOCK+1] (只用兩個執行緒處理) } } if(t==32){ //*** 使用獨立的 warp 讓 branch 更快 *** if(base+BLOCK>=n){ //右邊界. s[n-base+1]=0; } else{ s[BLOCK+1] = a[base+BLOCK]; } } __syncthreads(); //同步化 (確保共享記憶體已寫入) if(base+t<n){ b[base+t]=(s[t]+2*s[t+1]+s[t+2])*0.25; //輸出三點加權平均值 } }; extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK) { dim3 block(BLOCK, 1, 1); dim3 grid(n/BLOCK+1, 1, 1); for(int k=0; k<loop; k++) { smooth_global<<< grid, block >>>(b, a, n); } } extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK) { dim3 block(BLOCK, 1, 1); dim3 grid(n/BLOCK+1, 1, 1); for(int k=0; k<loop; k++){ smooth_shared<<< grid, block, BLOCK+2 >>>(b, a, n, BLOCK); } }
ae68202a9b3fe1ad4aa65ba808064386b20d0bfe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <string> #include "conv.cuh" using namespace conv; using namespace std; typedef unsigned char* arrayChn; __global__ void ApplyMask(arrayChn old_r, arrayChn old_g, arrayChn old_b, int len, int side, int* kernel, int knl_size, int knl_sum, arrayChn red, arrayChn green, arrayChn blue, int NumImg) { int n = blockIdx.x; if (n < NumImg) { // Apply kernel mask on the 1-D image representation. Each 'old_channel' are the original image pixels to // be modified by the conv tecnique. 'color' arrays store the result of each application. // REMEMBER TO JUMP THE FIRST LINES AND COLLUNS AFTER EACH ITERATION (no padding used) int offset = n * len + side + 1; // jump the first line and collun int fin = offset + len - 1; // avoid the last collun for(int i = offset; i < fin; ++i) { short tmp_r = (short) ( (old_r[i - side - 1] * kernel[0]) + (old_r[i - side] * kernel[1]) + (old_r[i - side + 1] * kernel[2]) + (old_r[i - 1] * kernel[3]) + (old_r[i] * kernel[4]) + (old_r[i + 1] * kernel[5]) + (old_r[i + side - 1] * kernel[6]) + (old_r[i + side] * kernel[7]) + (old_r[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_r > 255) red[i] = (unsigned char) 255; else if (tmp_r < 0) red[i] = (unsigned char) 0; else red[i] = (unsigned char) tmp_r; short tmp_g = (short) ( (old_g[i - side - 1] * kernel[0]) + (old_g[i - side] * kernel[1]) + (old_g[i - side + 1] * kernel[2]) + (old_g[i - 1] * kernel[3]) + (old_g[i] * kernel[4]) + (old_g[i + 1] * kernel[5]) + (old_g[i + side - 1] * kernel[6]) + (old_g[i + side] * kernel[7]) + (old_g[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_g > 255) green[i] = (unsigned char) 255; else if (tmp_g < 0) green[i] = (unsigned char) 0; else green[i] = (unsigned char) tmp_g; short tmp_b = (short) ( (old_b[i - side - 1] * kernel[0]) + (old_b[i - side] * kernel[1]) + (old_b[i - side + 1] * kernel[2]) + (old_b[i - 1] * kernel[3]) + (old_b[i] * kernel[4]) + (old_b[i + 1] * kernel[5]) + (old_b[i + side - 1] * kernel[6]) + (old_b[i + side] * kernel[7]) + (old_b[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_b > 255) blue[i] = (unsigned char) 255; else if (tmp_b < 0) blue[i] = (unsigned char) 0; else blue[i] = (unsigned char) tmp_b; } } } void generateImage(string filename, arrayChn red, arrayChn green, arrayChn blue, int offset, int len, int side) { unsigned char *output_data = new unsigned char[3 * len]; int k = 0; for (int i = offset; i < offset + len; ++i) { output_data[k] = (unsigned char) red[i]; output_data[k+1] = (unsigned char) green[i]; output_data[k+2] = (unsigned char) blue[i]; k += 3; } if (!tje_encode_to_file(filename.c_str(), side, side, 3, output_data)) throw(t_ecp); } int main(int argc, char **argv) { if (argc < 2) { cout << "Execute with the number of images to be filtered" << endl; return 0; } int NumImg = atoi(argv[1]); vector<Image2D*> list_imgs; Kernel *k = new Kernel("kernel.txt"); int *knl_host = k->getLinear(); int knl_size = k->getLinearSize(); int knl_sum = k->getSum(); // cout << "kernel: " << endl; // for (int i = 0; i < size_knl; ++i) { // cout << knl_host[i] << " "; // } // cout << endl; try { for (int id = 1; id <= NumImg; ++id) list_imgs.push_back(new Image2D("../../img/simulation/"+to_string(id)+".jpg")); } catch (exception &e) { cout << e.what() << std::endl; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // all images must have the same resolution int size_x = list_imgs[0]->getWidth(); int size_y = list_imgs[0]->getHeight(); int len = size_x * size_y; int *knl_device; unsigned char *red_device, *red_device_copy, *red_host; unsigned char *green_device, *green_device_copy, *green_host; unsigned char *blue_device, *blue_device_copy, *blue_host; hipMalloc((void**) &knl_device, knl_size * sizeof(int)); hipMalloc((void**) &red_device, (NumImg * len) * sizeof(unsigned char)); hipMalloc((void**) &green_device, (NumImg * len) * sizeof(unsigned char)); hipMalloc((void**) &blue_device, (NumImg * len) * sizeof(unsigned char)); hipMalloc((void**) &red_device_copy, (NumImg * len) * sizeof(unsigned char)); hipMalloc((void**) &green_device_copy, (NumImg * len) * sizeof(unsigned char)); hipMalloc((void**) &blue_device_copy, (NumImg * len) * sizeof(unsigned char)); red_host = new unsigned char[NumImg * len]; green_host = new unsigned char[NumImg * len]; blue_host = new unsigned char[NumImg * len]; int c = 0; for (int id = 0; id < NumImg; ++id) { unsigned char** red = (unsigned char**) list_imgs[id]->getRed(); unsigned char** green = (unsigned char**) list_imgs[id]->getGreen(); unsigned char** blue = (unsigned char**) list_imgs[id]->getBlue(); for (int i = 0; i < size_x; ++i) { for (int j = 0; j < size_y; ++j) { red_host[c] = red[j][i]; green_host[c] = green[j][i]; blue_host[c] = blue[j][i]; c++; } } } // cout << endl << "debug green: " << endl; // for (int i = 0; i < NumImg*size_x*size_y; i++) { // cout << green_host[i] << " "; // } // cout << endl; hipMemcpy(knl_device, knl_host, (knl_size) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(red_device, red_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(green_device, green_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(blue_device, blue_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(red_device_copy, red_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(green_device_copy, green_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemcpy(blue_device_copy, blue_host, (NumImg * len) * sizeof(unsigned char), hipMemcpyHostToDevice); hipEventRecord(start); hipLaunchKernelGGL(( ApplyMask), dim3(NumImg), dim3(1), 0, 0, red_device_copy, green_device_copy, blue_device_copy, len, size_x, knl_device, knl_size, knl_sum, red_device, green_device, blue_device, NumImg); hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << "Elapsed time running " << NumImg << " images: " << milliseconds << "ms" << endl; hipMemcpy(red_host, red_device, (NumImg * len) * sizeof(unsigned char), hipMemcpyDeviceToHost); hipMemcpy(green_host, green_device, (NumImg * len) * sizeof(unsigned char), hipMemcpyDeviceToHost); hipMemcpy(blue_host, blue_device, (NumImg * len) * sizeof(unsigned char), hipMemcpyDeviceToHost); for (int i = 0; i < NumImg; i++) { generateImage("../../img/simulation/"+to_string(i+1)+"-out.jpg", red_host, green_host, blue_host, i*len, len, size_x); } hipFree(knl_device); hipFree(red_device); hipFree(green_device); hipFree(blue_device); delete[] knl_host; delete[] red_host; delete[] green_host; delete[] blue_host; delete k; for (int i = 0; i < list_imgs.size(); i++) delete list_imgs[i]; return 0; }
ae68202a9b3fe1ad4aa65ba808064386b20d0bfe.cu
#include <iostream> #include <vector> #include <string> #include "conv.cuh" using namespace conv; using namespace std; typedef unsigned char* arrayChn; __global__ void ApplyMask(arrayChn old_r, arrayChn old_g, arrayChn old_b, int len, int side, int* kernel, int knl_size, int knl_sum, arrayChn red, arrayChn green, arrayChn blue, int NumImg) { int n = blockIdx.x; if (n < NumImg) { // Apply kernel mask on the 1-D image representation. Each 'old_channel' are the original image pixels to // be modified by the conv tecnique. 'color' arrays store the result of each application. // REMEMBER TO JUMP THE FIRST LINES AND COLLUNS AFTER EACH ITERATION (no padding used) int offset = n * len + side + 1; // jump the first line and collun int fin = offset + len - 1; // avoid the last collun for(int i = offset; i < fin; ++i) { short tmp_r = (short) ( (old_r[i - side - 1] * kernel[0]) + (old_r[i - side] * kernel[1]) + (old_r[i - side + 1] * kernel[2]) + (old_r[i - 1] * kernel[3]) + (old_r[i] * kernel[4]) + (old_r[i + 1] * kernel[5]) + (old_r[i + side - 1] * kernel[6]) + (old_r[i + side] * kernel[7]) + (old_r[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_r > 255) red[i] = (unsigned char) 255; else if (tmp_r < 0) red[i] = (unsigned char) 0; else red[i] = (unsigned char) tmp_r; short tmp_g = (short) ( (old_g[i - side - 1] * kernel[0]) + (old_g[i - side] * kernel[1]) + (old_g[i - side + 1] * kernel[2]) + (old_g[i - 1] * kernel[3]) + (old_g[i] * kernel[4]) + (old_g[i + 1] * kernel[5]) + (old_g[i + side - 1] * kernel[6]) + (old_g[i + side] * kernel[7]) + (old_g[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_g > 255) green[i] = (unsigned char) 255; else if (tmp_g < 0) green[i] = (unsigned char) 0; else green[i] = (unsigned char) tmp_g; short tmp_b = (short) ( (old_b[i - side - 1] * kernel[0]) + (old_b[i - side] * kernel[1]) + (old_b[i - side + 1] * kernel[2]) + (old_b[i - 1] * kernel[3]) + (old_b[i] * kernel[4]) + (old_b[i + 1] * kernel[5]) + (old_b[i + side - 1] * kernel[6]) + (old_b[i + side] * kernel[7]) + (old_b[i + side + 1] * kernel[8]) ) / knl_sum; if (tmp_b > 255) blue[i] = (unsigned char) 255; else if (tmp_b < 0) blue[i] = (unsigned char) 0; else blue[i] = (unsigned char) tmp_b; } } } void generateImage(string filename, arrayChn red, arrayChn green, arrayChn blue, int offset, int len, int side) { unsigned char *output_data = new unsigned char[3 * len]; int k = 0; for (int i = offset; i < offset + len; ++i) { output_data[k] = (unsigned char) red[i]; output_data[k+1] = (unsigned char) green[i]; output_data[k+2] = (unsigned char) blue[i]; k += 3; } if (!tje_encode_to_file(filename.c_str(), side, side, 3, output_data)) throw(t_ecp); } int main(int argc, char **argv) { if (argc < 2) { cout << "Execute with the number of images to be filtered" << endl; return 0; } int NumImg = atoi(argv[1]); vector<Image2D*> list_imgs; Kernel *k = new Kernel("kernel.txt"); int *knl_host = k->getLinear(); int knl_size = k->getLinearSize(); int knl_sum = k->getSum(); // cout << "kernel: " << endl; // for (int i = 0; i < size_knl; ++i) { // cout << knl_host[i] << " "; // } // cout << endl; try { for (int id = 1; id <= NumImg; ++id) list_imgs.push_back(new Image2D("../../img/simulation/"+to_string(id)+".jpg")); } catch (exception &e) { cout << e.what() << std::endl; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // all images must have the same resolution int size_x = list_imgs[0]->getWidth(); int size_y = list_imgs[0]->getHeight(); int len = size_x * size_y; int *knl_device; unsigned char *red_device, *red_device_copy, *red_host; unsigned char *green_device, *green_device_copy, *green_host; unsigned char *blue_device, *blue_device_copy, *blue_host; cudaMalloc((void**) &knl_device, knl_size * sizeof(int)); cudaMalloc((void**) &red_device, (NumImg * len) * sizeof(unsigned char)); cudaMalloc((void**) &green_device, (NumImg * len) * sizeof(unsigned char)); cudaMalloc((void**) &blue_device, (NumImg * len) * sizeof(unsigned char)); cudaMalloc((void**) &red_device_copy, (NumImg * len) * sizeof(unsigned char)); cudaMalloc((void**) &green_device_copy, (NumImg * len) * sizeof(unsigned char)); cudaMalloc((void**) &blue_device_copy, (NumImg * len) * sizeof(unsigned char)); red_host = new unsigned char[NumImg * len]; green_host = new unsigned char[NumImg * len]; blue_host = new unsigned char[NumImg * len]; int c = 0; for (int id = 0; id < NumImg; ++id) { unsigned char** red = (unsigned char**) list_imgs[id]->getRed(); unsigned char** green = (unsigned char**) list_imgs[id]->getGreen(); unsigned char** blue = (unsigned char**) list_imgs[id]->getBlue(); for (int i = 0; i < size_x; ++i) { for (int j = 0; j < size_y; ++j) { red_host[c] = red[j][i]; green_host[c] = green[j][i]; blue_host[c] = blue[j][i]; c++; } } } // cout << endl << "debug green: " << endl; // for (int i = 0; i < NumImg*size_x*size_y; i++) { // cout << green_host[i] << " "; // } // cout << endl; cudaMemcpy(knl_device, knl_host, (knl_size) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(red_device, red_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(green_device, green_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(blue_device, blue_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(red_device_copy, red_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(green_device_copy, green_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemcpy(blue_device_copy, blue_host, (NumImg * len) * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaEventRecord(start); ApplyMask<<<NumImg, 1>>>(red_device_copy, green_device_copy, blue_device_copy, len, size_x, knl_device, knl_size, knl_sum, red_device, green_device, blue_device, NumImg); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << "Elapsed time running " << NumImg << " images: " << milliseconds << "ms" << endl; cudaMemcpy(red_host, red_device, (NumImg * len) * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(green_host, green_device, (NumImg * len) * sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaMemcpy(blue_host, blue_device, (NumImg * len) * sizeof(unsigned char), cudaMemcpyDeviceToHost); for (int i = 0; i < NumImg; i++) { generateImage("../../img/simulation/"+to_string(i+1)+"-out.jpg", red_host, green_host, blue_host, i*len, len, size_x); } cudaFree(knl_device); cudaFree(red_device); cudaFree(green_device); cudaFree(blue_device); delete[] knl_host; delete[] red_host; delete[] green_host; delete[] blue_host; delete k; for (int i = 0; i < list_imgs.size(); i++) delete list_imgs[i]; return 0; }
fc49d7244d7aa70598f944588bec868350e90d87.hip
// !!! This is a file automatically generated by hipify!!! #include "modcusp_library.h" int cusp_biCGSTAB_solver::cusp_biCGSTAB_initDevice(indexType devID) { int deviceCount = 0; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); return OPERROR; } if (deviceCount == 0) { printf("There are no available CUDA device(s). Reverting to a CPU Solver\n"); return NODEVICE; } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); if ( devID >= deviceCount) { printf("Device id=$d not found. Maximum id=%d. Reverting to a CPU solver \n", devID, deviceCount); return NODEVICE; } else { hipSetDevice(devID); hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, devID); printf("\nRunning on device %d: \"%s\"\n", devID, deviceProp.name); char msg[256]; SPRINTF(msg, "Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); } } return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_allocDevice(indexType n, indexType m) { N = n; nnz = m; if(N <= 0 or nnz <= N) { printf("The size of the coeffcient matrix is not set correctly, N=%d, NNZ=%d\n.", n, m); return OPERROR; } if(hipMalloc(&cooRowIndADev, nnz*sizeof(indexType)) != hipSuccess)return OPERROR; if(hipMalloc(&cooColIndADev, nnz*sizeof(indexType)) != hipSuccess)return OPERROR; if(hipMalloc(&cooValADev, nnz*sizeof(valueType)) != hipSuccess)return OPERROR; if(hipMalloc(&xDev, N*sizeof(valueType)) != hipSuccess)return OPERROR; if(hipMalloc(&bDev, N*sizeof(valueType)) != hipSuccess)return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_AInds(indexType *rows, indexType *cols) { if(hipMemcpy(cooRowIndADev, rows, nnz*sizeof(cooRowIndADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(cooColIndADev, cols, nnz*sizeof(cooColIndADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_system(valueType *Avals, valueType *xHost, valueType *bHost) { if(hipMemcpy(bDev, bHost, N*sizeof(bDev[0]),hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(xDev, xHost, N*sizeof(xDev[0]),hipMemcpyHostToDevice) != hipSuccess) return OPERROR; if(hipMemcpy(cooValADev, Avals, nnz*sizeof(cooValADev[0]), hipMemcpyHostToDevice) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyD2H_x(valueType *xHost) { if(hipMemcpy(xHost, xDev, N*sizeof(xHost[0]),hipMemcpyDeviceToHost) != hipSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_solveDev_system(valueType relTol, valueType absTol, indexType maxItr) { // Wrap device pointers thrust::device_ptr<indexType> wrapped_cooRowIndADev(cooRowIndADev); thrust::device_ptr<indexType> wrapped_cooColIndADev(cooColIndADev); thrust::device_ptr<valueType> wrapped_cooValADev(cooValADev); thrust::device_ptr<valueType> wrapped_xDev(xDev); thrust::device_ptr<valueType> wrapped_bDev(bDev); // Wrap in cusp array1d deviceIndexArrayView rowInds (wrapped_cooRowIndADev, wrapped_cooRowIndADev + nnz); deviceIndexArrayView colInds (wrapped_cooColIndADev, wrapped_cooColIndADev + nnz); deviceValueArrayView values (wrapped_cooValADev, wrapped_cooValADev + nnz); deviceValueArrayView x (wrapped_xDev, wrapped_xDev + N); deviceValueArrayView b (wrapped_bDev, wrapped_bDev + N); // Create coo_matrix_view from the 3 array1d views deviceView A(N, N, nnz, rowInds, colInds, values); // Setup a monitor and solve cusp::monitor<valueType> monitor(b, maxItr, relTol, absTol, false); cusp::precond::diagonal<valueType, devMemorySpace> M(A); cusp::krylov::bicgstab(A, x, b, monitor, M); residuals = monitor.residual_norm(); solverItr = monitor.iteration_count(); return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_getMonitor(valueType &res, indexType &nItr) { nItr = solverItr; res = residuals; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_shutdown() { if(hipFree(cooRowIndADev) != hipSuccess)return OPERROR; if(hipFree(cooColIndADev) != hipSuccess)return OPERROR; if(hipFree(cooValADev) != hipSuccess)return OPERROR; if(hipFree(xDev) != hipSuccess)return OPERROR; if(hipFree(bDev) != hipSuccess)return OPERROR; return OPSUCCESS; } /******************************************************/ //External Interfaces / /******************************************************/ extern "C" void* getInstance_cusp_biCGSTAB_solver() { cusp_biCGSTAB_solver *cusp_biCGSTAB_solver_ = new cusp_biCGSTAB_solver(0,0,0.,0.); return static_cast<void *>(cusp_biCGSTAB_solver_); } extern "C" int cusp_biCGSTAB_initDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType devID) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_initDevice(devID); } extern "C" int cusp_biCGSTAB_allocDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType n, indexType m) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_allocDevice(n,m); } extern "C" int cusp_biCGSTAB_copyH2D_AInds_intrf(void *cusp_biCGSTAB_solver_ptr, indexType *rows, indexType *cols) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_AInds(rows,cols); } extern "C" int cusp_biCGSTAB_copyH2D_system_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *Avals, valueType *xHost, valueType *bHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_system(Avals, xHost, bHost); } extern "C" int cusp_biCGSTAB_copyD2H_x_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *xHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyD2H_x(xHost); } extern "C" int cusp_biCGSTAB_solveDev_system_intrf(void *cusp_biCGSTAB_solver_ptr, valueType relTol, valueType absTol, indexType maxItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_solveDev_system(relTol, absTol, maxItr); } extern "C" int cusp_biCGSTAB_getMonitor_intrf(void *cusp_biCGSTAB_solver_ptr, valueType &residual, indexType &nItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_getMonitor(residual, nItr); } extern "C" int cusp_biCGSTAB_shutdown_intrf(void *cusp_biCGSTAB_solver_ptr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_shutdown(); }
fc49d7244d7aa70598f944588bec868350e90d87.cu
#include "modcusp_library.h" int cusp_biCGSTAB_solver::cusp_biCGSTAB_initDevice(indexType devID) { int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); return OPERROR; } if (deviceCount == 0) { printf("There are no available CUDA device(s). Reverting to a CPU Solver\n"); return NODEVICE; } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); if ( devID >= deviceCount) { printf("Device id=$d not found. Maximum id=%d. Reverting to a CPU solver \n", devID, deviceCount); return NODEVICE; } else { cudaSetDevice(devID); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); printf("\nRunning on device %d: \"%s\"\n", devID, deviceProp.name); char msg[256]; SPRINTF(msg, "Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); } } return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_allocDevice(indexType n, indexType m) { N = n; nnz = m; if(N <= 0 or nnz <= N) { printf("The size of the coeffcient matrix is not set correctly, N=%d, NNZ=%d\n.", n, m); return OPERROR; } if(cudaMalloc(&cooRowIndADev, nnz*sizeof(indexType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&cooColIndADev, nnz*sizeof(indexType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&cooValADev, nnz*sizeof(valueType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&xDev, N*sizeof(valueType)) != cudaSuccess)return OPERROR; if(cudaMalloc(&bDev, N*sizeof(valueType)) != cudaSuccess)return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_AInds(indexType *rows, indexType *cols) { if(cudaMemcpy(cooRowIndADev, rows, nnz*sizeof(cooRowIndADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(cooColIndADev, cols, nnz*sizeof(cooColIndADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyH2D_system(valueType *Avals, valueType *xHost, valueType *bHost) { if(cudaMemcpy(bDev, bHost, N*sizeof(bDev[0]),cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(xDev, xHost, N*sizeof(xDev[0]),cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; if(cudaMemcpy(cooValADev, Avals, nnz*sizeof(cooValADev[0]), cudaMemcpyHostToDevice) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_copyD2H_x(valueType *xHost) { if(cudaMemcpy(xHost, xDev, N*sizeof(xHost[0]),cudaMemcpyDeviceToHost) != cudaSuccess) return OPERROR; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_solveDev_system(valueType relTol, valueType absTol, indexType maxItr) { // Wrap device pointers thrust::device_ptr<indexType> wrapped_cooRowIndADev(cooRowIndADev); thrust::device_ptr<indexType> wrapped_cooColIndADev(cooColIndADev); thrust::device_ptr<valueType> wrapped_cooValADev(cooValADev); thrust::device_ptr<valueType> wrapped_xDev(xDev); thrust::device_ptr<valueType> wrapped_bDev(bDev); // Wrap in cusp array1d deviceIndexArrayView rowInds (wrapped_cooRowIndADev, wrapped_cooRowIndADev + nnz); deviceIndexArrayView colInds (wrapped_cooColIndADev, wrapped_cooColIndADev + nnz); deviceValueArrayView values (wrapped_cooValADev, wrapped_cooValADev + nnz); deviceValueArrayView x (wrapped_xDev, wrapped_xDev + N); deviceValueArrayView b (wrapped_bDev, wrapped_bDev + N); // Create coo_matrix_view from the 3 array1d views deviceView A(N, N, nnz, rowInds, colInds, values); // Setup a monitor and solve cusp::monitor<valueType> monitor(b, maxItr, relTol, absTol, false); cusp::precond::diagonal<valueType, devMemorySpace> M(A); cusp::krylov::bicgstab(A, x, b, monitor, M); residuals = monitor.residual_norm(); solverItr = monitor.iteration_count(); return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_getMonitor(valueType &res, indexType &nItr) { nItr = solverItr; res = residuals; return OPSUCCESS; } int cusp_biCGSTAB_solver::cusp_biCGSTAB_shutdown() { if(cudaFree(cooRowIndADev) != cudaSuccess)return OPERROR; if(cudaFree(cooColIndADev) != cudaSuccess)return OPERROR; if(cudaFree(cooValADev) != cudaSuccess)return OPERROR; if(cudaFree(xDev) != cudaSuccess)return OPERROR; if(cudaFree(bDev) != cudaSuccess)return OPERROR; return OPSUCCESS; } /******************************************************/ //External Interfaces / /******************************************************/ extern "C" void* getInstance_cusp_biCGSTAB_solver() { cusp_biCGSTAB_solver *cusp_biCGSTAB_solver_ = new cusp_biCGSTAB_solver(0,0,0.,0.); return static_cast<void *>(cusp_biCGSTAB_solver_); } extern "C" int cusp_biCGSTAB_initDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType devID) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_initDevice(devID); } extern "C" int cusp_biCGSTAB_allocDevice_intrf(void *cusp_biCGSTAB_solver_ptr, indexType n, indexType m) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_allocDevice(n,m); } extern "C" int cusp_biCGSTAB_copyH2D_AInds_intrf(void *cusp_biCGSTAB_solver_ptr, indexType *rows, indexType *cols) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_AInds(rows,cols); } extern "C" int cusp_biCGSTAB_copyH2D_system_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *Avals, valueType *xHost, valueType *bHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyH2D_system(Avals, xHost, bHost); } extern "C" int cusp_biCGSTAB_copyD2H_x_intrf(void *cusp_biCGSTAB_solver_ptr,valueType *xHost) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_copyD2H_x(xHost); } extern "C" int cusp_biCGSTAB_solveDev_system_intrf(void *cusp_biCGSTAB_solver_ptr, valueType relTol, valueType absTol, indexType maxItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_solveDev_system(relTol, absTol, maxItr); } extern "C" int cusp_biCGSTAB_getMonitor_intrf(void *cusp_biCGSTAB_solver_ptr, valueType &residual, indexType &nItr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_getMonitor(residual, nItr); } extern "C" int cusp_biCGSTAB_shutdown_intrf(void *cusp_biCGSTAB_solver_ptr) { cusp_biCGSTAB_solver *ptr = static_cast<cusp_biCGSTAB_solver*>(cusp_biCGSTAB_solver_ptr); return ptr->cusp_biCGSTAB_shutdown(); }
da181059546f3bb7214902d7480e637dc2b14a04.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> //////////////////////////////////////////////////////////////////////////////// #include "jpeglib.h" /** * IMAGE DATA FORMATS: * * The standard input image format is a rectangular array of pixels, with * each pixel having the same number of "component" values (color channels). * Each pixel row is an array of JSAMPLEs (which typically are unsigned chars). * If you are working with color data, then the color values for each pixel * must be adjacent in the row; for example, R,G,B,R,G,B,R,G,B,... for 24-bit * RGB color. */ /* The "frame structure" structure contains an image frame (in RGB or grayscale * formats) for passing around the CS338 projects. */ typedef struct frame_struct { JSAMPLE *image_buffer; /* Points to large array of R,G,B-order/grayscale data * Access directly with: * image_buffer[num_components*pixel + component] */ JSAMPLE **row_pointers; /* Points to an array of pointers to the beginning * of each row in the image buffer. Use to access * the image buffer in a row-wise fashion, with: * row_pointers[row][num_components*pixel + component] */ int image_height; /* Number of rows in image */ int image_width; /* Number of columns in image */ int num_components; /* Number of components (usually RGB=3 or gray=1) */ } frame_struct_t; typedef frame_struct_t *frame_ptr; #define MAXINPUTS 1 #define MAXOUTPUTS 1 frame_ptr input_frames[MAXINPUTS]; /* Pointers to input frames */ frame_ptr output_frames[MAXOUTPUTS]; /* Pointers to output frames */ /* Read/write JPEGs, for program startup & shutdown */ void write_JPEG_file (char * filename, frame_ptr p_info, int quality); frame_ptr read_JPEG_file (char * filename); /* Allocate/deallocate frame buffers, USE AS NECESSARY! */ frame_ptr allocate_frame(int height, int width, int num_components); void destroy_frame(frame_ptr kill_me); /** * write_JPEG_file writes out the contents of an image buffer to a JPEG. * A quality level of 2-100 can be provided (default = 75, high quality = ~95, * low quality = ~25, utter pixellation = 2). Note that unlike read_JPEG_file, * it does not do any memory allocation on the buffer passed to it. */ void write_JPEG_file (char * filename, frame_ptr p_info, int quality) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; FILE * outfile; /* target file */ /* Step 1: allocate and initialize JPEG compression object */ cinfo.err = jpeg_std_error(&jerr); jpeg_create_compress(&cinfo); /* Step 2: specify data destination (eg, a file) */ /* Note: steps 2 and 3 can be done in either order. */ if ((outfile = fopen(filename, "wb")) == NULL) { fprintf(stderr, "ERROR: Can't open output file %s\n", filename); exit(1); } jpeg_stdio_dest(&cinfo, outfile); /* Step 3: set parameters for compression */ /* Set basic picture parameters (not optional) */ cinfo.image_width = p_info->image_width; /* image width and height, in pixels */ cinfo.image_height = p_info->image_height; cinfo.input_components = p_info->num_components; /* # of color components per pixel */ if (p_info->num_components == 3) cinfo.in_color_space = JCS_RGB; /* colorspace of input image */ else if (p_info->num_components == 1) cinfo.in_color_space = JCS_GRAYSCALE; else { fprintf(stderr, "ERROR: Non-standard colorspace for compressing!\n"); exit(1); } /* Fill in the defaults for everything else, then override quality */ jpeg_set_defaults(&cinfo); jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */); /* Step 4: Start compressor */ jpeg_start_compress(&cinfo, TRUE); /* Step 5: while (scan lines remain to be written) */ /* jpeg_write_scanlines(...); */ while (cinfo.next_scanline < cinfo.image_height) { (void) jpeg_write_scanlines(&cinfo, &(p_info->row_pointers[cinfo.next_scanline]), 1); } /* Step 6: Finish compression & close output */ jpeg_finish_compress(&cinfo); fclose(outfile); /* Step 7: release JPEG compression object */ jpeg_destroy_compress(&cinfo); } /** * read_JPEG_file reads the contents of a JPEG into an image buffer, which * is automatically allocated after the size of the image is determined. * We want to return a frame struct on success, NULL on error. */ frame_ptr read_JPEG_file (char * filename) { /* This struct contains the JPEG decompression parameters and pointers to * working space (which is allocated as needed by the JPEG library). */ struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE * infile; /* source file */ frame_ptr p_info; /* Output frame information */ /* Step 1: allocate and initialize JPEG decompression object */ cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); /* Step 2: open & specify data source (eg, a file) */ if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, "ERROR: Can't open input file %s\n", filename); exit(1); } jpeg_stdio_src(&cinfo, infile); /* Step 3: read file parameters with jpeg_read_header() */ (void) jpeg_read_header(&cinfo, TRUE); /* Step 4: use default parameters for decompression */ /* Step 5: Start decompressor */ (void) jpeg_start_decompress(&cinfo); /* Step X: Create a frame struct & buffers and fill in the blanks */ fprintf(stderr, " Opened %s: height = %d, width = %d, c = %d\n", filename, cinfo.output_height, cinfo.output_width, cinfo.output_components); p_info = allocate_frame(cinfo.output_height, cinfo.output_width, cinfo.output_components); /* Step 6: while (scan lines remain to be read) */ /* jpeg_read_scanlines(...); */ while (cinfo.output_scanline < cinfo.output_height) { (void) jpeg_read_scanlines(&cinfo, &(p_info->row_pointers[cinfo.output_scanline]), 1); } /* Step 7: Finish decompression */ (void) jpeg_finish_decompress(&cinfo); /* Step 8: Release JPEG decompression object & file */ jpeg_destroy_decompress(&cinfo); fclose(infile); return p_info; } /** * allocate/destroy_frame allocate a frame_struct_t and fill in the * blanks appropriately (including allocating the actual frames), and * then destroy them afterwards. */ frame_ptr allocate_frame(int height, int width, int num_components) { int row_stride; /* physical row width in output buffer */ int i; frame_ptr p_info; /* Output frame information */ /* JSAMPLEs per row in output buffer */ row_stride = width * num_components; /* Basic struct and information */ if ((p_info = (frame_struct_t*)malloc(sizeof(frame_struct_t))) == NULL) { fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } p_info->image_height = height; p_info->image_width = width; p_info->num_components = num_components; /* Image array and pointers to rows */ if ((p_info->row_pointers = (JSAMPLE**)malloc(sizeof(JSAMPLE *) * height)) == NULL) { fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } if ((p_info->image_buffer = (JSAMPLE*)malloc(sizeof(JSAMPLE) * row_stride * height)) == NULL){ fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } for (i=0; i < height; i++) p_info->row_pointers[i] = & (p_info->image_buffer[i * row_stride]); /* And send it back! */ return p_info; } void destroy_frame(frame_ptr kill_me) { free(kill_me->image_buffer); free(kill_me->row_pointers); free(kill_me); } ////////////////////////////////////////////////////////////////////////////////// /////////////////// My Code Starts Here ////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// /** * For double threshold analysis, these are the two thresholds. * Atleast from the input images I've looked on, these have shown good * results. **/ int low_threshold = 30; int high_threshold = 70; /** * For Gaussian kernel, this is mainly due to popularity with * different people who implemented the algorithm. **/ #define SIGMA 1.4 /** * the size of a sobel operator: gradient in x and y direction. **/ #define SOBEL_MASK_SIZE 9 /** * size of a gaussian mask. I am using a 5x5 kernel mask, with sigma = 1.4 * and k = 2. **/ #define GAUSSIAN_MASK_SIZE 25 /** * Global sobel operators-- one in the x direction and the other in the y direction. * The two operators are used to find gradient changes (by convolution) in an image. * they are also used to find gradient magnitude and the angle of a pixel, which is used * for non-maximum suppression to bring out edges. **/ int xGradient[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; int yGradient[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1}; /** * runs a kernel with specification as: * no use of constant memory and shared memory.It takes a frame_ptr * as the output of the edge detection algorithm. The input is found in * input_frames[0]. **/ void runKernel(frame_ptr result); /** * runs a device kernel with either constant memory, shared memory, or both * turned on. To use shared memory and tiling, #define RUN_CONST_SHARED_MEM_KERNEL. * if not defined, the default kernel executed is the one that only uses constant memory * without tiling. **/ void run_const_shared_mem_kernel(frame_ptr result); /** * this runs the sequential version of canny edge detector. No device memory * or compute power is used here. **/ void runSequential(frame_ptr output); /** * Given a k and sigma, it computes a gaussian filter kernel of size * (2k + 1) x (2k + 1) **/ void set_convo_kernel(float *kernel, int k, float sigma); // memcpy error in the definition of the hysteresis analysis using // shared data. #define RUN_CONST_SHARED_MEM_KERNEL /** * For testing the output of each step. That way you can get what it looks * like to have an xGradient Applied only. whereas in the other implementations, * this step is combined with magnitude and angle together. * -- To get this, uncomment the define, and set prepareKernelCall() to call the * runKernel() function. **/ //#define RUN_INDIVIDUAL_STEPS /** * Makes sure values match in the two images * @credits: Professor Kelly. **/ void checkResults(frame_ptr f1, frame_ptr f2) { int i, j, k; if(f1->image_height != f2->image_height && f1->image_width != f2->image_width && f1->num_components != f2->num_components){ fprintf(stderr, "Dimensions do not match\n"); exit(1); } for (i=0; i < f1->image_height; i++){ for (j=0; j < f1->image_width; j++){ for (k=0; k < f1->num_components; k++){ JSAMPLE j1 = f1->row_pointers[i][(f1->num_components)*j+k]; JSAMPLE j2 = f2->row_pointers[i][(f2->num_components)*j+k]; if(j1 != j2){ fprintf(stderr, "Values do not match at (%d, %d, %d) \n", i, j, k); fprintf(stderr, "from %d\n", j1); fprintf(stderr, "to %d\n", j2); exit(1); } } } } } /** * converts a colored image (with R, G, B values) into a grayscale * image using the conversion ratios of * newpixel = R*0.2125 + G*0.7154 + B*0.0721 * If the image given is in grayscale already, nothing is done. * * @required src.height == dst.height && src.width == src.height * @param src is the colored image * @param dst is the output of the conversion */ void toGrayScale(frame_ptr src, frame_ptr dst) { if (src->num_components == 1) { // iterates over the entire image and do a 1-1 copy since // input image is already in grayscale. for (int i = 0; i < src->image_height; i++) { for (int j = 0; j < src->image_width; j++) { dst->row_pointers[i][j] = src->row_pointers[i][j]; } } } else { // iterates over the entire image and apply the // conversion ratios to create a grayscale image. for (int i = 0; i < src->image_height; i++) { for (int j = 0; j < src->image_width; j++) { dst->row_pointers[i][j] = src->row_pointers[i][3*j] * 0.2125 + src->row_pointers[i][3*j + 1] * 0.7154 + src->row_pointers[i][3*j + 2] * 0.0721; } } } } /** * This is just a helper method. It calls specific functions that either run * on the device (runKernel and run_const_shared_mem_kernel()) or the host (runSequential). * If the input image is colored, the function converts it to grayscale and then * passes it as input to one of the functions that does edge detection. */ void prepareKernelCall() { // input image frame_ptr from = input_frames[0]; // Allocate frame for kernel to store its results into output_frames[0] = allocate_frame(from->image_height, from->image_width, 1); // do grayscale conversion if the image contains // values for RGB colored image. if (input_frames[0]->num_components > 1) { // allocate a new frame for a grayscale image with height // and width similar to the input image. output_frames[1] = allocate_frame(from->image_height, from->image_width, 1); // convert to grayscale, write it to output_frames[1] toGrayScale(input_frames[0], output_frames[1]); destroy_frame(input_frames[0]); // destroy old frame input_frames[0] = output_frames[1]; // put the new gray frame as input frame_ptr output_frames[1] = NULL; // clear out the output frames. output_frames[0] = input_frames[0]; } // call a simple kernel without constant or shared memory, the sequential implementation, // or a constant memory kernel, or a constant memory with shared memory kernel. // this calls the regular device kernel. To do step by step, #define RUN_INDIVIDUAL STEPS, // comment out the unnecessary kernels, and copy the output of the kernel desired to the // parameter passed in. // runKernel(output_frames[0]); // this either runs a shared memory with constant memory kernel or a kernel with only constant // memory. #define RUNS_CONST_SHARED_MEM_KERNEL if you want the kernel with both optimizations. run_const_shared_mem_kernel(output_frames[0]); // this simply runs the sequential version of the program. // runSequential(output_frames[0]); } /***************************************************************************** ********************** SEQUENTIAL CODE BEGINS HERE ************************** *****************************************************************************/ /** * A sequential implementation of a guassian blurring algorithm. it uses a * (2k+1)*(2k+1) gaussian mask to do a convolution on the image and calculate * blurred pixels. * @param from an input image * @param to where the output is write. * @param kernel is the gaussian mask. * @param k is the integer described in the size of the gaussian mask. **/ void seq_gaussianBlur(frame_ptr from, frame_ptr to, float *kernel, int k) { // iterates over the entire image matrix and apply the // gaussian mask over the entire image. for (int row = 0; row < from->image_height; row++) { for (int col = 0; col < from->image_width; col++) { // blurred pixel. int newpixel = 0; // applying convolution with the gaussian mask. for (int i = -1*k; i <= k; i++) { int k_offset = (i+k) * (2*k + 1); for (int j = -1*k; j <= k; j++) { int nrow = row + i; int ncol = col + j; // make sure you are convolving over valid pixels if (nrow >= 0 && ncol >= 0 && nrow < from->image_height && ncol < from->image_width) { newpixel = newpixel + kernel[k_offset + (j+k)] * from->image_buffer[nrow*from->image_width + ncol]; } } } // write the blurred pixel to the output image. to->image_buffer[row*from->image_width + col] = newpixel; } } } /** * applies sobel operators on the input image to generate magnitude matrix * and gradient angle, which are used on the next step to do non-maximum * suppression. * @requires from, magnitude, angle have the same dimensions. * @requires xGradient, and yGradient have a 3x3 size. * @param from is the input image * @param magnitude is the image pointer where pixels gradient magnitude is written to. * @param angle is where gradient direction is written to. * @param xGradient, yGradient are sobel kernels in the x and y directions respectively. **/ void seq_gradientCalculation(frame_ptr from, frame_ptr magnitude, frame_ptr angle, int * xGradient, int *yGradient) { // accumulates gradient in the x and y direction for each pixel int xGrad, yGrad; // iterates over the entire pixels of the image. for (int row = 0; row < from->image_height; row++) { for(int col = 0; col < from->image_width; col++) { // resets the accumulated gradient for each pixel xGrad = 0; yGrad = 0; // convolution of gradient masks with the pixel (row, col) region for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // make sure the neighbor exists before applying convolution. if ((nrow >= 0) && (ncol >= 0) && (nrow < from->image_height) && (ncol < from->image_width)) { xGrad = xGrad + (xGradient[(i+1)*3 + (j+1)] * from->image_buffer[nrow*from->image_width + ncol]); yGrad = yGrad + (yGradient[(i+1)*3 + (j+1)] * from->image_buffer[nrow*from->image_width + ncol]); } } } // normalize pixel intensity values that are out of bounds (> 255 or < 0) if (xGrad > 255) xGrad = 255; if (yGrad > 255) yGrad = 255; xGrad = abs(xGrad); yGrad = abs(yGrad); // calculate the magnitude gradient and adds it to the output magnitude // image. int mag = hypot((float) xGrad, (float) yGrad); magnitude->image_buffer[row*from->image_width + col] = mag; // calculates the angle of each pixel, converts them to degrees and // write the result to the angle frame_ptr float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angle->image_buffer[row*from->image_width + col] = angle_degrees; } } } /** * implements non-maximum suppression on the magnitude pixels given the * angle information in the argument. The output of this stage is written to * the output frame_ptr. * @requires same dimension for all input frame_ptrs. **/ void seq_maxSuppression(frame_ptr magnitude, frame_ptr angle_fptr, frame_ptr output) { int height = magnitude->image_height; int width = magnitude->image_width; // iterate over all the pixels in the image and for each pixel (row, col) // do a hysteresis analysis. for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int back_pixel, front_pixel; int pixel = row*width + col; int angle = angle_fptr->image_buffer[pixel]; // chooses the direction of the angle and checks if // the pixel at (row, col) is a local maximum or not. // it is suppressed if it is not a local maximum, otherwise it is kept. if (angle > 0 && angle < 23) { // 0 degree angle back_pixel = (col-1 >= 0) ? magnitude->image_buffer[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude->image_buffer[pixel+1] : 0; } else if (angle >= 23 && angle < 68) { // 45 degree angle back_pixel = ((row+1) < height && (col-1) >= 0) ? magnitude->image_buffer[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magnitude->image_buffer[(row-1)*width + (col+1)] : 0; } else if (angle >= 68 && angle < 113) { // 90 degree angle back_pixel = (row - 1 >= 0) ? magnitude->image_buffer[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magnitude->image_buffer[(row+1)*width + col] : 0; } else if (angle >= 113 && angle < 158) { // 135 degree angle back_pixel = (row-1 >= 0 && col-1 >= 0) ? magnitude->image_buffer[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magnitude->image_buffer[(row+1)*width + (col+1)] : 0; } else { // everything else is around 180 degrees. back_pixel = (col-1 >= 0) ? magnitude->image_buffer[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude->image_buffer[pixel+1] : 0; } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magnitude->image_buffer[pixel] < back_pixel || magnitude->image_buffer[pixel] < front_pixel) { output->image_buffer[pixel] = 0; } else { output->image_buffer[pixel] = magnitude->image_buffer[pixel]; } } } } /** * Combines double threshold analysis with edge tracking to finalize the * edge detection algorithm. * @requires: input, final_output have same dimension. * @param: low_threshold, high_threshold are the two thresholds to consider for threshold * analysis. **/ void seq_doubleThresholdAndHysteresis(frame_ptr input, frame_ptr final_output, int low_threshold, int high_threshold) { int width = input->image_width; int height = input->image_height; // double threshold analysis to classify pixels into either // a strong edge or weak edge. // iterates over the entire pixels of the input frame_ptr for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int pixel = row*width + col; // if greater than the threshold, set it as a strong edge // else if between low and high threshold, set it as a weak edge // else suppress it. if (input->image_buffer[pixel] >= high_threshold) { input->image_buffer[pixel] = 255; } else if (input->image_buffer[pixel] < high_threshold && input->image_buffer[pixel] >= low_threshold) { input->image_buffer[pixel] = low_threshold; } else { input->image_buffer[pixel] = 0; } } } // hyteresis analysis to find the relationship between weak and // strong edges. // iterates over the entire pixels in the image. for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int pixel = row*width + col; // hysteresis edge tracking: we look at the neighbors of the weak pixel (row, col) and // if there is a strong neighbor, the pixel becomes strong. if (input->image_buffer[pixel] > 0 && input->image_buffer[pixel] < 255) { // check to see if any of the 8 neighbors of the pixel (row, col) // with weak intensity is a strong edge. // make sure also there is a neighbor in the col-1, col+1, row-1, row+1 directions. if (((col-1 >= 0) && (input->image_buffer[row*width + (col-1)] == 255)) || ((col+1 < width) && (input->image_buffer[row*width + (col+1)] == 255)) || ((row+1 < height) && (input->image_buffer[(row+1)*width + col] == 255)) || ((row+1 < height) && (col-1 >= 0) && input->image_buffer[(row+1)*width + (col-1)] == 255) || ((row+1 < height) && (col+1 < width) && (input->image_buffer[(row+1)*width + (col+1)] == 255)) || ((row-1 >= 0) && (col+1 < width) && (input->image_buffer[(row-1)*width + (col+1)] == 255)) || ((row-1 >= 0) && (col-1 >= 0) && (input->image_buffer[(row-1)*width + (col-1)] == 255)) || ((row-1 >= 0) && (input->image_buffer[(row-1)*width + col] == 255))) { final_output->image_buffer[pixel] = 255; } else { final_output->image_buffer[pixel] = 0; } } else { final_output->image_buffer[pixel] = input->image_buffer[pixel]; } } } } /** * Runs the sequential implementation of the Canny Edge algorithm. * it creates the necessary temporary frame_ptr for each step of the * algorithm and reuse some frame_ptr as is fit. **/ void runSequential(frame_ptr final_output) { printf("\t..... Running Sequential......\n"); // calculates the elapse time for the function. clock_t time_in_milli; time_in_milli = clock(); frame_ptr greyimage = input_frames[0]; frame_ptr blurimage = allocate_frame(greyimage->image_height, greyimage->image_width, 1); frame_ptr magnitude = allocate_frame(greyimage->image_height, greyimage->image_width, 1); // kernel mask for gaussian filter int k = 2; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); // blurs the image to remove noise. seq_gaussianBlur(greyimage, blurimage, kernel, k); // calculate gradient changes in the image to find edges // reuses greyimage frame_ptr to store pixels angles seq_gradientCalculation(blurimage, magnitude, greyimage, xGradient, yGradient); // non-maximum suppression // reusing blurimage frame_ptr as output for the maximum suppression // operation. seq_maxSuppression(magnitude, greyimage, blurimage); // hysteresis analysis-- edge tracking to find the relationship between // weak and strong edges. // blurimage refers to maxSuppressed output. seq_doubleThresholdAndHysteresis(blurimage, final_output, low_threshold, high_threshold); time_in_milli = clock() - time_in_milli; double inMilli = (((double) time_in_milli) / CLOCKS_PER_SEC) * 1000; printf("Elapsed Time in Milliseconds: %f\n", inMilli); // kill the frames allocated here destroy_frame(blurimage); destroy_frame(magnitude); } /****************************************************************************** ********************** DEVICE CODE STARTS HERE ******************************* ******************************************************************************/ /** * A cuda implementation of a guassian blurring algorithm. it uses a * (2k+1)*(2k+1) gaussian mask to do a convolution on the image and calculate * blurred pixels. No constant memory or shared memory is used here. * @param from an input image * @param to where the output is write. * @param kernel is the gaussian mask. * @param k is the integer described in the size of the gaussian mask. **/ __global__ void APPLY_GAUSSIAN_BLUR(float *kernel, int k, unsigned char *from, unsigned char *to, int height, int width) { int row, col, newpixel, k_len; newpixel = 0; // the new blurred pixel. k_len = 2*k + 1; // length of the kernel mask. col = threadIdx.x + blockIdx.x * blockDim.x; row = threadIdx.y + blockIdx.y * blockDim.y; // make sure it is a valid pixel. if (col < width && row < height) { for (int i = -1*k; i <= k; i++) { // iterates kernel row int k_offset = (i+k) * k_len; for (int j = -1*k; j <= k; j++) { // iterates kernel col int nrow = row + i; int ncol = col + j; // make sure the neighbor being considered for convolution actually exists. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) { newpixel = newpixel + kernel[k_offset + (j+k)] * from[nrow*width + ncol]; } } } // writes the blurred pixel to the "to" frame_ptr. to[row*width + col] = newpixel; } } /** * Applies a Sobel filter mask (either in the x or y direction) in a convolution over the neighbors of each pixel * handled by each thread. * @requires: from and to have the same dimension specified by height and width. * @requires: sobelKernel is 3x3 **/ __global__ void applySobelOperatorKernel(int *sobelKernel, unsigned char *from, unsigned char *to, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int newPixel = 0; // make sure it is a valid pixel. if (col < width && row < height) { // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // bounds checking. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) { newPixel = newPixel + sobelKernel[(i+1)*3 + (j+1)] * from[nrow*width + ncol]; } } } // normalize the out of bounds pixel values (> 255 or < 0) if (newPixel < 0) { newPixel = abs( newPixel); } if (newPixel > 255) { newPixel = 255; } // write it to the output to[row*width + col] = newPixel; } } /** * Given the gradient matrix in the x and y direction, this function computes the gradient magnitude * and angle which are used for non-maximum suppression. * @requires: Gx, Gy, magnitude, and pixel_angle have the same dimensions, specified by the height, width. **/ __global__ void pixelMagnitudeAndAngle(unsigned char *Gx, unsigned char *Gy, unsigned char *magnitude, unsigned char *pixel_angle, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure it is a valid pixel if (col < width && row < height) { int pixel = row*width + col; magnitude[pixel] = hypot((float) Gx[pixel], (float) Gy[pixel]); // gets the angle of the pixel and collapses it to the nearest horizontal, vertical, // or diagonal angle of (0, 45, 90, 135 degrees angles) by converting the angle // in radian to degrees. It also gets the absolute value of the angle to reduce redundancy // of two points on the opposite ends of the same diagonal line. That way we have, pi/2 and -pi/2 // mapping to pi/2, pi and 0 mapping to 0, etc. float arctan = atan2((float) Gy[pixel], (float) Gx[pixel]); float inDegrees = abs(arctan) * (180.0 / M_PI); // collapses the different angles into four categories depending on the // proximity of the angle found to each of the four. if (inDegrees > 0 && inDegrees <= 22.5) { pixel_angle[pixel] = 0; } else if (inDegrees > 22.5 && inDegrees <= 67.5) { pixel_angle[pixel] = 45; } else if (inDegrees > 67.5 && inDegrees <= 112.5) { pixel_angle[pixel] = 90; } else if (inDegrees > 112.5 && inDegrees <= 157.5) { pixel_angle[pixel] = 135; } else { // because we get absolute value, everything else is either 180 or 0 pixel_angle[pixel] = 0; } } } /** * Given the pixel gradient angle information, this function suppresses pixels that * are not local maximum in the direction dictated by their angle. This is the * non-maximum analysis stage. * @requires: all image matrix inputs have the dimension described in height and width. **/ __global__ void nonMaximumSuppression(unsigned char *magnitude, unsigned char *pixel_angle, unsigned char *final_suppression, int height, int width) { int front_pixel, back_pixel, pixel, row, col; col = threadIdx.x + blockIdx.x*blockDim.x; row = threadIdx.y + blockIdx.y*blockDim.y; // make sure it is a valid pixel. if (col < width && row < height) { pixel = row*width + col; // chooses a back and front neighbor based on whether the neighbor // in the direction given by pixel angle exists. if (pixel_angle[pixel] == 0) { back_pixel = (col-1 >= 0) ? magnitude[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude[pixel+1] : 0; } else if (pixel_angle[pixel] == 45) { back_pixel = ((row+1) < height && (col-1) >= 0) ? magnitude[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magnitude[(row-1)*width + (col+1)] : 0; } else if (pixel_angle[pixel] == 90) { back_pixel = (row - 1 >= 0) ? magnitude[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magnitude[(row+1)*width + col] : 0; } else if (pixel_angle[pixel] == 135) { back_pixel = (row-1 >= 0 && col-1 >= 0) ? magnitude[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magnitude[(row+1)*width + (col+1)] : 0; } else { printf("### BAD ANGLE: %d\n", pixel_angle[pixel]); } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magnitude[pixel] < back_pixel || magnitude[pixel] < front_pixel) { final_suppression[pixel] = 0; } else { final_suppression[pixel] = magnitude[pixel]; } } } /** * Given low and high thresholds, this function suppresses or keep pixels based on whether * they are greater the low_threshold or not. It standardizes all strong edges here. * @requires: image pixels have the dimension described by height and width. **/ __global__ void thresholdAnalysis(unsigned char *suppressed_pixels, unsigned char *output_pixels, int high_threshold, int low_threshold, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel. if (col < width && row < height) { int pixel = row*width + col; // suppress less than the low threshold. Standardize to strong edge if // greater than high threshold. if (suppressed_pixels[pixel] >= high_threshold) output_pixels[pixel] = 255; else if (suppressed_pixels[pixel] < high_threshold && suppressed_pixels[pixel] >= low_threshold) output_pixels[pixel] = low_threshold; else output_pixels[pixel] = 0; } } /** * Does hysteresis analysis to find relationship between weak edges and strong edges. * the output of this step is the final output of the edge detection algorithm. * @requires: dimension of image matrices are equal to height * width. **/ __global__ void hystEdgeTracking(unsigned char *threshold_pixels, unsigned char *output_pixels, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // checks if its a valid pixel. if (col < width && row < height) { // hysteresis edge tracking: we look at the neighbors of the pixel (row, col) and // if there is a strong neighbor, the pixel becomes strong. int pixel = row*width + col; // check if it is a weak edge or not. if (threshold_pixels[pixel] > 0 && threshold_pixels[pixel] < 255) { int found = 0; // check the neighbors of the weak edge to find if there is // a strong edge around. for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // make sure the neighbor exists. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) if (threshold_pixels[nrow*width + ncol] == 255) { found = 1; i = j = 3; } // declare the weak edge strong if it has a strong neighbor. if (found) output_pixels[pixel] = 255; else output_pixels[pixel] = 0; } } } else { output_pixels[pixel] = threshold_pixels[pixel]; } } } ///////////////////////////////////////////////////////////////////////////////////////// /** * A regular implementation for finding the gradient and angle using sobel operators. * Here instead of doing it one by one as above, we do everything together so that we * the optimized implementation seen in the sequential, constant memory, and shared memory * implementations. * @requires: the same specification as the other implementation above. ***/ __global__ void gradient_calculation(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int *xGradient, int *yGradient, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { // saves the row index here int nrow = row + i; for (int j = -1; j <= 1; j++) { int ncol = col + j; if (ncol >= 0 && nrow >= 0 && ncol < width && nrow < height) { xGrad = xGrad + xGradient[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; yGrad = yGrad + yGradient[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; } } } // saves the magnitude gradient value. magMatrix[row*width + col] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row*width + col] = angle_degrees; } } /** * A generic device non-maximum suppression algorithm. It is used for all three different * implementations. No need for constant memory or shared memory. It is an alternative to * the regular device implementation(nonMaxSuppression) which depends on collapsing angles * to either 0, 45, 90, or 135 degrees. * @requires: the parameters conform to the specification in nonMaxSuppression. **/ __global__ void non_max_suppression(unsigned char *magMatrix, unsigned char *angleMatrix, unsigned char *suppressedMatrix, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int back_pixel, front_pixel; int pixel = row*width + col; int angle = angleMatrix[pixel]; // chooses the direction of the angle and checks if // the pixel at (row, col) is a local maximum or not. // it is suppressed if it is not a local maximum, otherwise it is kept. if (angle > 0 && angle < 23) { // 0 degree angle back_pixel = (col-1 >= 0) ? magMatrix[pixel-1] : 0; front_pixel = (col+1 < width) ? magMatrix[pixel+1] : 0; } else if (angle >= 23 && angle < 68) { // 45 degree angle back_pixel = ((row+1) < height && (col-1) >= 0) ? magMatrix[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magMatrix[(row-1)*width + (col+1)] : 0; } else if (angle >= 68 && angle < 113) { // 90 degree angle back_pixel = (row - 1 >= 0) ? magMatrix[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magMatrix[(row+1)*width + col] : 0; } else if (angle >= 113 && angle < 158) { // 135 degree angle back_pixel = (row-1 >= 0 && col-1 >= 0) ? magMatrix[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magMatrix[(row+1)*width + (col+1)] : 0; } else { // everything else is around 180 degrees. back_pixel = (col-1 >= 0) ? magMatrix[pixel-1] : 0; front_pixel = (col+1 < width) ? magMatrix[pixel+1] : 0; } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magMatrix[pixel] < back_pixel || magMatrix[pixel] < front_pixel) { suppressedMatrix[pixel] = 0; } else { suppressedMatrix[pixel] = magMatrix[pixel]; } } } /***************************************************************************** ********************* WITH CONSTANT MEMORY AND CACHING ********************** *****************************************************************************/ /** * Constant memory for sobel operators mask and gaussian kernel mask. **/ __constant__ int xGradientMask[SOBEL_MASK_SIZE]; __constant__ int yGradientMask[SOBEL_MASK_SIZE]; __constant__ float GaussianMask[GAUSSIAN_MASK_SIZE]; /** * A guassian blur implementation that uses constant memory (GaussianMask) declared above. * It requires that the kernel mask generated for the convolution is copied to the constant * memory. * Everything else happens like the regular device gaussian implementation above. **/ __global__ void const_mem_gaussian_blur(unsigned char *inputMatrix, unsigned char *blurMatrix, int k, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int blurPixel = 0; int kernelLen = 2*k + 1; // make sure it is a valid pixel. if (col < width && row < height) { // do convolution by iterating over all the neighbors // of the pixel (row, col). for (int i = -1*k; i <= k; i++) { int nrow = row + i; int offset = (i+k) * kernelLen; for (int j = -1*k; j <= k; j++) { int ncol = col + j; // make sure the neighbor exists. if (ncol < width && ncol >= 0 && nrow < height && nrow >= 0) { blurPixel = blurPixel + GaussianMask[offset + (j+k)] * inputMatrix[nrow*width + ncol]; } } } // write the pixel output. blurMatrix[row*width + col] = blurPixel; } } /** * This function finds the gradient magnitude and gradient angle of the blurMatrix * by convolving sobelMasks (in the y and x directions) with the blurMatrix. * The gradient in the x and y directions are found in place to avoid using extra space * and computational time. **/ __global__ void const_mem_sobel_filter(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { // saves the row index here int nrow = row + i; for (int j = -1; j <= 1; j++) { int ncol = col + j; if (ncol >= 0 && nrow >= 0 && ncol < width && nrow < height) { xGrad = xGrad + xGradientMask[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; yGrad = yGrad + yGradientMask[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; } } } // saves the magnitude gradient value. magMatrix[row*width + col] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row*width + col] = angle_degrees; } } /**************************************************************************** ********************** TILING AND SHARED MEMORY **************************** ****************************************************************************/ /** * Defines the relationship between tileWidth and BlockWidth for * the use of shared memory and pulling data from global memory. * blockWidth = tileWidth + kernelLength - 1; **/ #define SHARED_MEM_TILE_WIDTH 4 #define SHARED_MEM_FOR_SOBEL (SHARED_MEM_TILE_WIDTH + 2) #define GAUSSIAN_LEN 5 #define SOBEL_LEN 3 #define G_LEN (SHARED_MEM_TILE_WIDTH + GAUSSIAN_LEN - 1); #define S_LEN (SHARED_MEM_FOR_SOBEL + SOBEL_LEN - 1); // for regular blocks-- without using tiling. #define REG_BLOCK_LEN 32 // makes it compatible for CUDA. For some reason, couldnt // assign the immediate #defines __constant__ const int gausLen = G_LEN; __constant__ const int sobelLen = S_LEN; /** * This is an extension over the const_mem_gaussian_blur algorithm described above. * The only addition is the used of shared memory and tiling to locally saved * global data into a shared memory. * @requires: something is copied to the GaussianMask constant memory. * @requires: the relationship between tileWidth and BlockWidth is preserved here. * @requires: dimension of matrices should match the height and width argument. **/ __global__ void const_shared_mem_gaussian_blur(unsigned char *inputMatrix, unsigned char *blurMatrix, int k, int tileWidth, int height, int width) { int tx = threadIdx.x; int ty = threadIdx.y; int col_o = blockIdx.x * tileWidth + tx; // index to the output int row_o = blockIdx.y * tileWidth + ty; // index to the output. int kernelLen = 2*k + 1; int col_i = col_o - (kernelLen/2); // where to draw the data for shared memory from. int row_i = row_o - (kernelLen/2); // where to draw the data for shared memory from. int blurPixel = 0; // shared memory per block. __shared__ unsigned char shared_tile[gausLen][gausLen]; // retrieving data from the global memory to the shared tile memory. // makes sure a thread is working on a valid. if ((row_i >= 0) && (col_i >= 0) && (row_i < height) && (col_i < width)) { shared_tile[ty][tx] = inputMatrix[row_i*width + col_i]; } else { // put 0 in the place of invalid pixels. shared_tile[ty][tx] = 0; } __syncthreads(); // make sure the thread is supposed to be doing computations. if (ty < tileWidth && tx < tileWidth) { // convolution happens here by iterating over the neighbors of // the pixel. for (int i = 0; i < kernelLen; i++) for (int j = 0; j < kernelLen; j++) blurPixel = blurPixel + GaussianMask[i*kernelLen + j]*shared_tile[i+ty][j+tx]; // make sure the output indices are valid. if (row_o < height && col_o < width) { blurMatrix[row_o*width + col_o] = blurPixel; } } } /** * This is also an extension of the constant memory sobel filter device kernel above. * In addition to using constant memory, it also uses shared memory to compute the gradient * magnitude and direction from the blurMatrix. * @requires: GaussianMask, and xGradientMask and yGradientMask have the appropriate data copied in. * @requires: the relationship between tileWidth and blockWidth is reserved. * @requires: dimensions match the arguments given. **/ __global__ void const_shared_mem_sobel_filter(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int k, int tileWidth, int height, int width) { int tx = threadIdx.x; int ty = threadIdx.y; // output indices int col_o = blockIdx.x * tileWidth + tx; int row_o = blockIdx.y * tileWidth + ty; // input indices. int col_i = col_o - 1; int row_i = row_o - 1; // shared memory for the block __shared__ unsigned char shared_tile[sobelLen][sobelLen]; // retrieving data from the global memory to the shared tile memory. // makes sure a thread is working on a valid. if ((row_i >= 0) && (col_i >= 0) && (row_i < height) && (col_i < width)) { shared_tile[ty][tx] = blurMatrix[row_i*width + col_i]; } else { // put 0 in the place of invalid pixels. shared_tile[ty][tx] = 0; } __syncthreads(); // make sure indices are within tileWith if (ty < tileWidth && tx < tileWidth) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { xGrad = xGrad + xGradientMask[i*3 + j] * shared_tile[i+ty][j+tx]; yGrad = yGrad + yGradientMask[i*3 + j] * shared_tile[i+ty][j+tx]; } } if (row_o < height && col_o < width) { // saves the magnitude gradient value. magMatrix[row_o*width + col_o] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row_o*width + col_o] = angle_degrees; } } } /****************************************************************************** ********************** DEVICE CODE ENDS HERE ********************************* ******************************************************************************/ /** * Computes a Gaussian convolutional kernel for a given size. A gaussian * kernel is given by: * => H_{i,j} = (1/2*pi*std_dev^2)*exp(-[(i-(k+1))^2 + (j-(k+1))^2] / (2*std_dev^2)) * * The result of the operation is written to the integer buffer given. * @param kernel is a 1-dimensional array to contain kernel values. Indexing into * the array is given by "row*kernel_length + col". * @param kernel_length is the height/width of the kernel. For every kernel (height = width). * @param std_dev is the standard deviation of values to consider when averaging * neighboring pixels. * * The idea for normalizing: * https://stackoverflow.com/questions/8204645/implementing-gaussian-blur-how-to-calculate-convolution-matrix-kernel */ void set_convo_kernel(float *kernel, int k, float sigma) { if (k < 1) { printf("For Gaussian kernel, k is undefined: %d\n", k); exit(1); } if (sigma <= 0) { printf("Standard Deviation < 0: %f\n", sigma); exit(1); } //initializes constants of the Gaussian kernel. int kernLen = 2*k + 1; float sigmaSqCons = 2 * sigma * sigma; float sigmaPiCons = 1.0 / (sigmaSqCons * M_PI); int k_inc = k + 1; // iterates and fills rows and columns of the kernel float sum = 0.0; for (int i = 1; i <= kernLen; i++) { int row_offset = (i - 1) * kernLen; for (int j = 1; j <= kernLen; j++) { int index = row_offset + (j - 1); float i_pow = pow(i - k_inc, 2.0); float j_pow = pow(j - k_inc, 2.0); float val = sigmaPiCons * exp(-1 * (i_pow + j_pow) / sigmaSqCons); sum = sum + val; kernel[index] = val; } } // Normalize the kernel for (int x = 0; x < kernLen; ++x) for (int y = 0; y < kernLen; ++y) kernel[x*kernLen + y] /= sum; } /** * Finds the strongest pixel intensity value in an image and sets the upper threshold as * a 0.7 * highest_pixel_intensity. * @requires: pixels_ptr->num_components = 1. * @param pixels_ptr is an array of pixels for an image. **/ int maxPixelIntensity(frame_ptr imgPixels) { int max = 0; for (int i = 0; i < imgPixels->image_height; i++) { for (int j = 0; j < imgPixels->image_width; j++) { if (imgPixels->row_pointers[i][j] > max) max = imgPixels->row_pointers[i][j]; } } return max; } /** * allocates space on the device memory and writes the address to the * memory location to the d_pointer. * @param: d_pointer is the location of the pointer to the memory allocated * on the device * @param: numBytes is the number of bytes to allocate on the device. **/ void setDevMemory(void **d_pointer, int numBytes) { if (hipMalloc(d_pointer, numBytes) != hipSuccess) { printf("### CANT SET DEVICE MEMORY: %p\n", d_pointer); exit(1); } } /** * Copies bytes from one memory location to another. The memory locations * can either be on the device, host, or both. This is just a wrapper function * for the cuda implementation, hipMemcpy(). * @param: dst where the bytes should be copied to. * @param: src the location that contains the bytes. * @param: numBytes is how many bytes there is to copy. * @param: dir indicates whether to copy from device to device, device to host, * host to device, or host to host. **/ void cpyMemory(void *dst, void *src, int numBytes, hipMemcpyKind dir) { if (hipMemcpy(dst, src, numBytes, dir) != hipSuccess) { printf("### MEM CPY FAIL : %p -> %p\n", src, dst); exit(1); } } /** * Checks if the last device kernel was successfully executed or not. **/ void checkErrorForLastKernel() { if (hipGetLastError() != hipSuccess) { printf("### Kernel Execution failed ###\n"); exit(1); } } /** * This function allocates the necessary resources for executing a kernel * that uses constant memory and shared memory (using tiling) to do convolution * over image as it processes it for edge detection. * -- xGradient, yGradient are declared as globalconstant memory * -- gaussianMask is also declared as global constant memory * Any thread working on convolution reads mask data from the global constant memory, * participate in tiling and pulling data from global memory, and write to the * global memory. * @param result is the output frame_ptr to write into. ***/ void run_const_shared_mem_kernel(frame_ptr result) { // setting a gaussian convolution kernel as a 5x5. int k = 2; int kernel_len = 2*k + 1; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); /** * timing kernel execution */ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // copies the local gaussian mask into the constant memory declared // globally. if (hipMemcpyToSymbol(GaussianMask, kernel, GAUSSIAN_MASK_SIZE * sizeof(int)) != hipSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } if (hipMemcpyToSymbol(xGradientMask, xGradient, 9 * sizeof(int)) != hipSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } if (hipMemcpyToSymbol(yGradientMask, yGradient, 9 * sizeof(int)) != hipSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } // image matrix information and the device pointers to store // input matrix and output matrix. unsigned char *d_from, *d_to, *d_magMatrix; int height = input_frames[0]->image_height; int width = input_frames[0]->image_width; int size = height * width; // allocate the space for input and output on the device // and copy the host input to the device input. setDevMemory((void**) &d_from, size); setDevMemory((void**) &d_to, size); setDevMemory((void**) &d_magMatrix, size); cpyMemory(d_from, input_frames[0]->image_buffer, size, hipMemcpyHostToDevice); // setting up tileWidth information for block and grid dimensions // of the kernel to execute. int tileWidth = SHARED_MEM_TILE_WIDTH; int tileWidthSobel = SHARED_MEM_FOR_SOBEL; // when doing gaussian filter, a 5x5 int blockWidth = tileWidth + kernel_len - 1; dim3 dimBlock(blockWidth, blockWidth, 1); // when doing sobel filters, a 3x3 kernel int blockWidth_Sobel = tileWidthSobel + 2; dim3 sobelBlock(blockWidth_Sobel, blockWidth_Sobel, 1); // a grid for shared memory, and a regular grid for anything else. dim3 dimGrid((width - 1)/tileWidth + 1, (height - 1)/tileWidth + 1, 1); #ifdef RUN_CONST_SHARED_MEM_KERNEL // the way to do regular gridding when using shared memory. dim3 regGrid((width - 1)/blockWidth + 1, (height - 1)/blockWidth + 1, 1); printf("\t......Running Const_Shared_Mem_Kernel.....\n"); // launching a kernel to perform a guassian blur on the image input. hipLaunchKernelGGL(( const_shared_mem_gaussian_blur), dim3(dimGrid), dim3(dimBlock), 0, 0, d_from, d_to, k, tileWidth, height, width); checkErrorForLastKernel(); // launching a kernel that performs sobel gradient analysis and // writes the result of gradient magnitude and pixel angle into // the matrices d_magMatrix and d_from, respectively hipLaunchKernelGGL(( const_shared_mem_sobel_filter), dim3(dimGrid), dim3(sobelBlock /*dimBlock*/), 0, 0, d_to, d_magMatrix, d_from, k, tileWidth, height, width); checkErrorForLastKernel(); #else blockWidth = REG_BLOCK_LEN; dim3 regGrid((width - 1)/blockWidth + 1, (height - 1)/blockWidth + 1, 1); dimBlock.x = blockWidth; dimBlock.y = blockWidth; printf("\t.....Running Const_Mem_Kernel.....\n"); // launching a kernel to perform a gaussian blur with constant // memory but without shared memory. hipLaunchKernelGGL(( const_mem_gaussian_blur), dim3(regGrid), dim3(dimBlock), 0, 0, d_from, d_to, k, height, width); checkErrorForLastKernel(); // launching a kernel that performs sobel gradient analysis with constant // memory but without using shared memory. hipLaunchKernelGGL(( const_mem_sobel_filter), dim3(regGrid), dim3(dimBlock), 0, 0, d_to, d_magMatrix, d_from, height, width); #endif // calls the non maximum suppression algorithm for a regular non constant // non-shared memory implementation hipLaunchKernelGGL(( non_max_suppression), dim3(regGrid), dim3(dimBlock), 0, 0, d_magMatrix, d_from, d_to, height, width); checkErrorForLastKernel(); hipLaunchKernelGGL(( thresholdAnalysis), dim3(regGrid), dim3(dimBlock), 0, 0, d_to, d_from, high_threshold, low_threshold, height, width); checkErrorForLastKernel(); // final step. calls the regular hysteresis analysis. hipLaunchKernelGGL(( hystEdgeTracking), dim3(regGrid), dim3(dimBlock), 0, 0, d_from, d_to, height, width); checkErrorForLastKernel(); cpyMemory(result->image_buffer, d_to, size, hipMemcpyDeviceToHost); // synchronizing the start and stop times to get the // elapsed time. hipEventRecord(stop); hipEventSynchronize(stop); float time_inMilli = 0; hipEventElapsedTime(&time_inMilli, start, stop); // prints the elapsed time. printf("Kernel Elapsed Time in ms: %.8f\n", time_inMilli); hipFree(d_from); hipFree(d_magMatrix); hipFree(d_to); } // This sets up GPU device by allocating the required memory and then // calls the kernel on GPU. (You might choose to add/remove arguments.) // It's currently set up to use the global variables and write its // final results into the specified argument. void runKernel(frame_ptr result) { // testing set_convo_kernel int k = 2; int kernel_len = 2*k + 1; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); float total = 0.0; for (int i = 0; i < kernel_len; i++) { for (int j = 0; j < kernel_len; j++) { total = total + kernel[i*kernel_len + j]; printf("%5d", (int) round(159 * kernel[i*kernel_len + j])); } printf("\n"); } printf("Gaussian Total: %.5f\n", total); ///////////////////////////////////////////////////////////// unsigned char *d_from, *d_to, *d_final_to, *d_magnitude, *d_pixel_angle, *d_final_suppression; int height = input_frames[0]->image_height; int width = input_frames[0]->image_width; int size = height * width; printf("\t......Running Regular Kernel......\n"); // cudaEvents to record the elapse time for kernel execution. hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // allocates to and from frame_ptrs and copies the global // frame_ptrs into the device memory. Exits when hipMalloc/hipMemcpy fails. setDevMemory((void**) &d_from, size); setDevMemory((void**) &d_to, size); setDevMemory((void**) &d_final_to, size); setDevMemory((void**) &d_magnitude, size); setDevMemory((void**) &d_pixel_angle, size); setDevMemory((void**) &d_final_suppression, size); cpyMemory(d_from, input_frames[0]->image_buffer, size, hipMemcpyHostToDevice); /// allocates space for the kernel weights and copies the /// kernel computed to the device memory. float *d_kernel; int k_numBytes = (kernel_len * kernel_len * sizeof(d_kernel[0])); setDevMemory((void**) &d_kernel, k_numBytes); cpyMemory(d_kernel, kernel, k_numBytes, hipMemcpyHostToDevice); //////////////////////////////////////////////////////////////// // sets the block and grid dimensions. int block_side = REG_BLOCK_LEN; dim3 dimBlock(block_side, block_side, 1); dim3 dimGrid(ceil(width/ (float) block_side), ceil(height/ (float) block_side), 1); // kernel call to blur an image. hipLaunchKernelGGL(( APPLY_GAUSSIAN_BLUR), dim3(dimGrid), dim3(dimBlock), 0, 0, d_kernel, k, d_from, d_to, height, width); checkErrorForLastKernel(); #ifdef RUNNING_INDIVIDUAL_STEPS // copies the result of Gaussian filter into the // from pointer to start the gradient kernel int g_numBytes = 9 * sizeof(xGradient[0]); int *d_sobelKernel; /// allocates space for a sobel kernel and copies one of the gradient kernels. setDevMemory((void**) &d_sobelKernel, g_numBytes); cpyMemory(d_sobelKernel, xGradient, g_numBytes, hipMemcpyHostToDevice); /// copies the result of the first kernel as input to the second kernel. cpyMemory(d_from, d_to, size, hipMemcpyDeviceToDevice); // Kernel call to apply Sobel Operator hipLaunchKernelGGL(( applySobelOperatorKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_sobelKernel, d_from, d_to, height, width); checkErrorForLastKernel(); cpyMemory(d_sobelKernel, yGradient, 9 * sizeof(yGradient[0]), hipMemcpyHostToDevice); // Kernel call to apply Sobel Operator hipLaunchKernelGGL(( applySobelOperatorKernel) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_sobelKernel, d_from, d_final_to, height, width); checkErrorForLastKernel(); // immediately frees gradient space allocated. hipFree(d_sobelKernel); // gradient magnitude and angle analysis hipLaunchKernelGGL(( pixelMagnitudeAndAngle) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_to, d_final_to, d_magnitude, d_pixel_angle, height, width); checkErrorForLastKernel(); // non-maximum suppression analysis. hipLaunchKernelGGL(( nonMaximumSuppression) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_magnitude, d_pixel_angle, d_final_suppression, height, width); checkErrorForLastKernel(); #else // copies the result of Gaussian filter into the // from pointer to start the gradient kernel int g_numBytes = 9 * sizeof(xGradient[0]); int *d_xGradient, *d_yGradient; /// allocates space for the two sobel kernel and copies one of the gradient kernels. setDevMemory((void**) &d_xGradient, g_numBytes); setDevMemory((void**) &d_yGradient, g_numBytes); cpyMemory(d_xGradient, xGradient, g_numBytes, hipMemcpyHostToDevice); cpyMemory(d_yGradient, yGradient, g_numBytes, hipMemcpyHostToDevice); // calculates gradient and angle information in one phase. hipLaunchKernelGGL(( gradient_calculation), dim3(dimGrid), dim3(dimBlock), 0, 0, d_to, d_magnitude, d_pixel_angle, d_xGradient, d_yGradient, height, width); checkErrorForLastKernel(); // non maximum suppression using the angle and magnitude found above. hipLaunchKernelGGL(( non_max_suppression), dim3(dimGrid), dim3(dimBlock), 0, 0, d_magnitude, d_pixel_angle, d_final_suppression, height, width); checkErrorForLastKernel(); hipFree(d_xGradient); hipFree(d_yGradient); #endif /// double threshold analysis. hipLaunchKernelGGL(( thresholdAnalysis) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_final_suppression, d_magnitude, high_threshold, low_threshold, height, width); checkErrorForLastKernel(); // hysteresis analysis - edge tracking to find relationship // between weak edges and strong edges. hipLaunchKernelGGL(( hystEdgeTracking) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_magnitude, d_to, height, width); checkErrorForLastKernel(); hipEventRecord(stop); // copies the results from the device memory into the // host output frame_ptr cpyMemory(result->image_buffer, d_to, size, hipMemcpyDeviceToHost); // synchronizing the start and stop times to get the // elapsed time. hipEventSynchronize(stop); float time_inMilli = 0; hipEventElapsedTime(&time_inMilli, start, stop); // prints the elapsed time in ms. printf("Kernel Elapsed Time in ms: %.8f\n", time_inMilli); // frees device resources hipFree(d_from); hipFree(d_to); hipFree(d_kernel); hipFree(d_final_to); hipFree(d_pixel_angle); hipFree(d_magnitude); hipFree(d_final_suppression); } /** * Host main routine */ int main(int argc, char **argv) { if(argc < 3){ fprintf(stderr, "ERROR: Need to specify input file and then output file\n"); exit(1); } input_frames[0] = read_JPEG_file(argv[1]); // Load input file prepareKernelCall(); // Do the actual work including calling CUDA kernel write_JPEG_file(argv[2], output_frames[0], 75); // Write output file //runKernel(NULL); return 0; }
da181059546f3bb7214902d7480e637dc2b14a04.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <math.h> #include <cuda_runtime.h> //////////////////////////////////////////////////////////////////////////////// #include "jpeglib.h" /** * IMAGE DATA FORMATS: * * The standard input image format is a rectangular array of pixels, with * each pixel having the same number of "component" values (color channels). * Each pixel row is an array of JSAMPLEs (which typically are unsigned chars). * If you are working with color data, then the color values for each pixel * must be adjacent in the row; for example, R,G,B,R,G,B,R,G,B,... for 24-bit * RGB color. */ /* The "frame structure" structure contains an image frame (in RGB or grayscale * formats) for passing around the CS338 projects. */ typedef struct frame_struct { JSAMPLE *image_buffer; /* Points to large array of R,G,B-order/grayscale data * Access directly with: * image_buffer[num_components*pixel + component] */ JSAMPLE **row_pointers; /* Points to an array of pointers to the beginning * of each row in the image buffer. Use to access * the image buffer in a row-wise fashion, with: * row_pointers[row][num_components*pixel + component] */ int image_height; /* Number of rows in image */ int image_width; /* Number of columns in image */ int num_components; /* Number of components (usually RGB=3 or gray=1) */ } frame_struct_t; typedef frame_struct_t *frame_ptr; #define MAXINPUTS 1 #define MAXOUTPUTS 1 frame_ptr input_frames[MAXINPUTS]; /* Pointers to input frames */ frame_ptr output_frames[MAXOUTPUTS]; /* Pointers to output frames */ /* Read/write JPEGs, for program startup & shutdown */ void write_JPEG_file (char * filename, frame_ptr p_info, int quality); frame_ptr read_JPEG_file (char * filename); /* Allocate/deallocate frame buffers, USE AS NECESSARY! */ frame_ptr allocate_frame(int height, int width, int num_components); void destroy_frame(frame_ptr kill_me); /** * write_JPEG_file writes out the contents of an image buffer to a JPEG. * A quality level of 2-100 can be provided (default = 75, high quality = ~95, * low quality = ~25, utter pixellation = 2). Note that unlike read_JPEG_file, * it does not do any memory allocation on the buffer passed to it. */ void write_JPEG_file (char * filename, frame_ptr p_info, int quality) { struct jpeg_compress_struct cinfo; struct jpeg_error_mgr jerr; FILE * outfile; /* target file */ /* Step 1: allocate and initialize JPEG compression object */ cinfo.err = jpeg_std_error(&jerr); jpeg_create_compress(&cinfo); /* Step 2: specify data destination (eg, a file) */ /* Note: steps 2 and 3 can be done in either order. */ if ((outfile = fopen(filename, "wb")) == NULL) { fprintf(stderr, "ERROR: Can't open output file %s\n", filename); exit(1); } jpeg_stdio_dest(&cinfo, outfile); /* Step 3: set parameters for compression */ /* Set basic picture parameters (not optional) */ cinfo.image_width = p_info->image_width; /* image width and height, in pixels */ cinfo.image_height = p_info->image_height; cinfo.input_components = p_info->num_components; /* # of color components per pixel */ if (p_info->num_components == 3) cinfo.in_color_space = JCS_RGB; /* colorspace of input image */ else if (p_info->num_components == 1) cinfo.in_color_space = JCS_GRAYSCALE; else { fprintf(stderr, "ERROR: Non-standard colorspace for compressing!\n"); exit(1); } /* Fill in the defaults for everything else, then override quality */ jpeg_set_defaults(&cinfo); jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */); /* Step 4: Start compressor */ jpeg_start_compress(&cinfo, TRUE); /* Step 5: while (scan lines remain to be written) */ /* jpeg_write_scanlines(...); */ while (cinfo.next_scanline < cinfo.image_height) { (void) jpeg_write_scanlines(&cinfo, &(p_info->row_pointers[cinfo.next_scanline]), 1); } /* Step 6: Finish compression & close output */ jpeg_finish_compress(&cinfo); fclose(outfile); /* Step 7: release JPEG compression object */ jpeg_destroy_compress(&cinfo); } /** * read_JPEG_file reads the contents of a JPEG into an image buffer, which * is automatically allocated after the size of the image is determined. * We want to return a frame struct on success, NULL on error. */ frame_ptr read_JPEG_file (char * filename) { /* This struct contains the JPEG decompression parameters and pointers to * working space (which is allocated as needed by the JPEG library). */ struct jpeg_decompress_struct cinfo; struct jpeg_error_mgr jerr; FILE * infile; /* source file */ frame_ptr p_info; /* Output frame information */ /* Step 1: allocate and initialize JPEG decompression object */ cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); /* Step 2: open & specify data source (eg, a file) */ if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, "ERROR: Can't open input file %s\n", filename); exit(1); } jpeg_stdio_src(&cinfo, infile); /* Step 3: read file parameters with jpeg_read_header() */ (void) jpeg_read_header(&cinfo, TRUE); /* Step 4: use default parameters for decompression */ /* Step 5: Start decompressor */ (void) jpeg_start_decompress(&cinfo); /* Step X: Create a frame struct & buffers and fill in the blanks */ fprintf(stderr, " Opened %s: height = %d, width = %d, c = %d\n", filename, cinfo.output_height, cinfo.output_width, cinfo.output_components); p_info = allocate_frame(cinfo.output_height, cinfo.output_width, cinfo.output_components); /* Step 6: while (scan lines remain to be read) */ /* jpeg_read_scanlines(...); */ while (cinfo.output_scanline < cinfo.output_height) { (void) jpeg_read_scanlines(&cinfo, &(p_info->row_pointers[cinfo.output_scanline]), 1); } /* Step 7: Finish decompression */ (void) jpeg_finish_decompress(&cinfo); /* Step 8: Release JPEG decompression object & file */ jpeg_destroy_decompress(&cinfo); fclose(infile); return p_info; } /** * allocate/destroy_frame allocate a frame_struct_t and fill in the * blanks appropriately (including allocating the actual frames), and * then destroy them afterwards. */ frame_ptr allocate_frame(int height, int width, int num_components) { int row_stride; /* physical row width in output buffer */ int i; frame_ptr p_info; /* Output frame information */ /* JSAMPLEs per row in output buffer */ row_stride = width * num_components; /* Basic struct and information */ if ((p_info = (frame_struct_t*)malloc(sizeof(frame_struct_t))) == NULL) { fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } p_info->image_height = height; p_info->image_width = width; p_info->num_components = num_components; /* Image array and pointers to rows */ if ((p_info->row_pointers = (JSAMPLE**)malloc(sizeof(JSAMPLE *) * height)) == NULL) { fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } if ((p_info->image_buffer = (JSAMPLE*)malloc(sizeof(JSAMPLE) * row_stride * height)) == NULL){ fprintf(stderr, "ERROR: Memory allocation failure\n"); exit(1); } for (i=0; i < height; i++) p_info->row_pointers[i] = & (p_info->image_buffer[i * row_stride]); /* And send it back! */ return p_info; } void destroy_frame(frame_ptr kill_me) { free(kill_me->image_buffer); free(kill_me->row_pointers); free(kill_me); } ////////////////////////////////////////////////////////////////////////////////// /////////////////// My Code Starts Here ////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////// /** * For double threshold analysis, these are the two thresholds. * Atleast from the input images I've looked on, these have shown good * results. **/ int low_threshold = 30; int high_threshold = 70; /** * For Gaussian kernel, this is mainly due to popularity with * different people who implemented the algorithm. **/ #define SIGMA 1.4 /** * the size of a sobel operator: gradient in x and y direction. **/ #define SOBEL_MASK_SIZE 9 /** * size of a gaussian mask. I am using a 5x5 kernel mask, with sigma = 1.4 * and k = 2. **/ #define GAUSSIAN_MASK_SIZE 25 /** * Global sobel operators-- one in the x direction and the other in the y direction. * The two operators are used to find gradient changes (by convolution) in an image. * they are also used to find gradient magnitude and the angle of a pixel, which is used * for non-maximum suppression to bring out edges. **/ int xGradient[9] = {-1, 0, 1, -2, 0, 2, -1, 0, 1}; int yGradient[9] = {1, 2, 1, 0, 0, 0, -1, -2, -1}; /** * runs a kernel with specification as: * no use of constant memory and shared memory.It takes a frame_ptr * as the output of the edge detection algorithm. The input is found in * input_frames[0]. **/ void runKernel(frame_ptr result); /** * runs a device kernel with either constant memory, shared memory, or both * turned on. To use shared memory and tiling, #define RUN_CONST_SHARED_MEM_KERNEL. * if not defined, the default kernel executed is the one that only uses constant memory * without tiling. **/ void run_const_shared_mem_kernel(frame_ptr result); /** * this runs the sequential version of canny edge detector. No device memory * or compute power is used here. **/ void runSequential(frame_ptr output); /** * Given a k and sigma, it computes a gaussian filter kernel of size * (2k + 1) x (2k + 1) **/ void set_convo_kernel(float *kernel, int k, float sigma); // memcpy error in the definition of the hysteresis analysis using // shared data. #define RUN_CONST_SHARED_MEM_KERNEL /** * For testing the output of each step. That way you can get what it looks * like to have an xGradient Applied only. whereas in the other implementations, * this step is combined with magnitude and angle together. * -- To get this, uncomment the define, and set prepareKernelCall() to call the * runKernel() function. **/ //#define RUN_INDIVIDUAL_STEPS /** * Makes sure values match in the two images * @credits: Professor Kelly. **/ void checkResults(frame_ptr f1, frame_ptr f2) { int i, j, k; if(f1->image_height != f2->image_height && f1->image_width != f2->image_width && f1->num_components != f2->num_components){ fprintf(stderr, "Dimensions do not match\n"); exit(1); } for (i=0; i < f1->image_height; i++){ for (j=0; j < f1->image_width; j++){ for (k=0; k < f1->num_components; k++){ JSAMPLE j1 = f1->row_pointers[i][(f1->num_components)*j+k]; JSAMPLE j2 = f2->row_pointers[i][(f2->num_components)*j+k]; if(j1 != j2){ fprintf(stderr, "Values do not match at (%d, %d, %d) \n", i, j, k); fprintf(stderr, "from %d\n", j1); fprintf(stderr, "to %d\n", j2); exit(1); } } } } } /** * converts a colored image (with R, G, B values) into a grayscale * image using the conversion ratios of * newpixel = R*0.2125 + G*0.7154 + B*0.0721 * If the image given is in grayscale already, nothing is done. * * @required src.height == dst.height && src.width == src.height * @param src is the colored image * @param dst is the output of the conversion */ void toGrayScale(frame_ptr src, frame_ptr dst) { if (src->num_components == 1) { // iterates over the entire image and do a 1-1 copy since // input image is already in grayscale. for (int i = 0; i < src->image_height; i++) { for (int j = 0; j < src->image_width; j++) { dst->row_pointers[i][j] = src->row_pointers[i][j]; } } } else { // iterates over the entire image and apply the // conversion ratios to create a grayscale image. for (int i = 0; i < src->image_height; i++) { for (int j = 0; j < src->image_width; j++) { dst->row_pointers[i][j] = src->row_pointers[i][3*j] * 0.2125 + src->row_pointers[i][3*j + 1] * 0.7154 + src->row_pointers[i][3*j + 2] * 0.0721; } } } } /** * This is just a helper method. It calls specific functions that either run * on the device (runKernel and run_const_shared_mem_kernel()) or the host (runSequential). * If the input image is colored, the function converts it to grayscale and then * passes it as input to one of the functions that does edge detection. */ void prepareKernelCall() { // input image frame_ptr from = input_frames[0]; // Allocate frame for kernel to store its results into output_frames[0] = allocate_frame(from->image_height, from->image_width, 1); // do grayscale conversion if the image contains // values for RGB colored image. if (input_frames[0]->num_components > 1) { // allocate a new frame for a grayscale image with height // and width similar to the input image. output_frames[1] = allocate_frame(from->image_height, from->image_width, 1); // convert to grayscale, write it to output_frames[1] toGrayScale(input_frames[0], output_frames[1]); destroy_frame(input_frames[0]); // destroy old frame input_frames[0] = output_frames[1]; // put the new gray frame as input frame_ptr output_frames[1] = NULL; // clear out the output frames. output_frames[0] = input_frames[0]; } // call a simple kernel without constant or shared memory, the sequential implementation, // or a constant memory kernel, or a constant memory with shared memory kernel. // this calls the regular device kernel. To do step by step, #define RUN_INDIVIDUAL STEPS, // comment out the unnecessary kernels, and copy the output of the kernel desired to the // parameter passed in. // runKernel(output_frames[0]); // this either runs a shared memory with constant memory kernel or a kernel with only constant // memory. #define RUNS_CONST_SHARED_MEM_KERNEL if you want the kernel with both optimizations. run_const_shared_mem_kernel(output_frames[0]); // this simply runs the sequential version of the program. // runSequential(output_frames[0]); } /***************************************************************************** ********************** SEQUENTIAL CODE BEGINS HERE ************************** *****************************************************************************/ /** * A sequential implementation of a guassian blurring algorithm. it uses a * (2k+1)*(2k+1) gaussian mask to do a convolution on the image and calculate * blurred pixels. * @param from an input image * @param to where the output is write. * @param kernel is the gaussian mask. * @param k is the integer described in the size of the gaussian mask. **/ void seq_gaussianBlur(frame_ptr from, frame_ptr to, float *kernel, int k) { // iterates over the entire image matrix and apply the // gaussian mask over the entire image. for (int row = 0; row < from->image_height; row++) { for (int col = 0; col < from->image_width; col++) { // blurred pixel. int newpixel = 0; // applying convolution with the gaussian mask. for (int i = -1*k; i <= k; i++) { int k_offset = (i+k) * (2*k + 1); for (int j = -1*k; j <= k; j++) { int nrow = row + i; int ncol = col + j; // make sure you are convolving over valid pixels if (nrow >= 0 && ncol >= 0 && nrow < from->image_height && ncol < from->image_width) { newpixel = newpixel + kernel[k_offset + (j+k)] * from->image_buffer[nrow*from->image_width + ncol]; } } } // write the blurred pixel to the output image. to->image_buffer[row*from->image_width + col] = newpixel; } } } /** * applies sobel operators on the input image to generate magnitude matrix * and gradient angle, which are used on the next step to do non-maximum * suppression. * @requires from, magnitude, angle have the same dimensions. * @requires xGradient, and yGradient have a 3x3 size. * @param from is the input image * @param magnitude is the image pointer where pixels gradient magnitude is written to. * @param angle is where gradient direction is written to. * @param xGradient, yGradient are sobel kernels in the x and y directions respectively. **/ void seq_gradientCalculation(frame_ptr from, frame_ptr magnitude, frame_ptr angle, int * xGradient, int *yGradient) { // accumulates gradient in the x and y direction for each pixel int xGrad, yGrad; // iterates over the entire pixels of the image. for (int row = 0; row < from->image_height; row++) { for(int col = 0; col < from->image_width; col++) { // resets the accumulated gradient for each pixel xGrad = 0; yGrad = 0; // convolution of gradient masks with the pixel (row, col) region for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // make sure the neighbor exists before applying convolution. if ((nrow >= 0) && (ncol >= 0) && (nrow < from->image_height) && (ncol < from->image_width)) { xGrad = xGrad + (xGradient[(i+1)*3 + (j+1)] * from->image_buffer[nrow*from->image_width + ncol]); yGrad = yGrad + (yGradient[(i+1)*3 + (j+1)] * from->image_buffer[nrow*from->image_width + ncol]); } } } // normalize pixel intensity values that are out of bounds (> 255 or < 0) if (xGrad > 255) xGrad = 255; if (yGrad > 255) yGrad = 255; xGrad = abs(xGrad); yGrad = abs(yGrad); // calculate the magnitude gradient and adds it to the output magnitude // image. int mag = hypot((float) xGrad, (float) yGrad); magnitude->image_buffer[row*from->image_width + col] = mag; // calculates the angle of each pixel, converts them to degrees and // write the result to the angle frame_ptr float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angle->image_buffer[row*from->image_width + col] = angle_degrees; } } } /** * implements non-maximum suppression on the magnitude pixels given the * angle information in the argument. The output of this stage is written to * the output frame_ptr. * @requires same dimension for all input frame_ptrs. **/ void seq_maxSuppression(frame_ptr magnitude, frame_ptr angle_fptr, frame_ptr output) { int height = magnitude->image_height; int width = magnitude->image_width; // iterate over all the pixels in the image and for each pixel (row, col) // do a hysteresis analysis. for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int back_pixel, front_pixel; int pixel = row*width + col; int angle = angle_fptr->image_buffer[pixel]; // chooses the direction of the angle and checks if // the pixel at (row, col) is a local maximum or not. // it is suppressed if it is not a local maximum, otherwise it is kept. if (angle > 0 && angle < 23) { // 0 degree angle back_pixel = (col-1 >= 0) ? magnitude->image_buffer[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude->image_buffer[pixel+1] : 0; } else if (angle >= 23 && angle < 68) { // 45 degree angle back_pixel = ((row+1) < height && (col-1) >= 0) ? magnitude->image_buffer[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magnitude->image_buffer[(row-1)*width + (col+1)] : 0; } else if (angle >= 68 && angle < 113) { // 90 degree angle back_pixel = (row - 1 >= 0) ? magnitude->image_buffer[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magnitude->image_buffer[(row+1)*width + col] : 0; } else if (angle >= 113 && angle < 158) { // 135 degree angle back_pixel = (row-1 >= 0 && col-1 >= 0) ? magnitude->image_buffer[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magnitude->image_buffer[(row+1)*width + (col+1)] : 0; } else { // everything else is around 180 degrees. back_pixel = (col-1 >= 0) ? magnitude->image_buffer[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude->image_buffer[pixel+1] : 0; } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magnitude->image_buffer[pixel] < back_pixel || magnitude->image_buffer[pixel] < front_pixel) { output->image_buffer[pixel] = 0; } else { output->image_buffer[pixel] = magnitude->image_buffer[pixel]; } } } } /** * Combines double threshold analysis with edge tracking to finalize the * edge detection algorithm. * @requires: input, final_output have same dimension. * @param: low_threshold, high_threshold are the two thresholds to consider for threshold * analysis. **/ void seq_doubleThresholdAndHysteresis(frame_ptr input, frame_ptr final_output, int low_threshold, int high_threshold) { int width = input->image_width; int height = input->image_height; // double threshold analysis to classify pixels into either // a strong edge or weak edge. // iterates over the entire pixels of the input frame_ptr for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int pixel = row*width + col; // if greater than the threshold, set it as a strong edge // else if between low and high threshold, set it as a weak edge // else suppress it. if (input->image_buffer[pixel] >= high_threshold) { input->image_buffer[pixel] = 255; } else if (input->image_buffer[pixel] < high_threshold && input->image_buffer[pixel] >= low_threshold) { input->image_buffer[pixel] = low_threshold; } else { input->image_buffer[pixel] = 0; } } } // hyteresis analysis to find the relationship between weak and // strong edges. // iterates over the entire pixels in the image. for (int row = 0; row < height; row++) { for (int col = 0; col < width; col++) { int pixel = row*width + col; // hysteresis edge tracking: we look at the neighbors of the weak pixel (row, col) and // if there is a strong neighbor, the pixel becomes strong. if (input->image_buffer[pixel] > 0 && input->image_buffer[pixel] < 255) { // check to see if any of the 8 neighbors of the pixel (row, col) // with weak intensity is a strong edge. // make sure also there is a neighbor in the col-1, col+1, row-1, row+1 directions. if (((col-1 >= 0) && (input->image_buffer[row*width + (col-1)] == 255)) || ((col+1 < width) && (input->image_buffer[row*width + (col+1)] == 255)) || ((row+1 < height) && (input->image_buffer[(row+1)*width + col] == 255)) || ((row+1 < height) && (col-1 >= 0) && input->image_buffer[(row+1)*width + (col-1)] == 255) || ((row+1 < height) && (col+1 < width) && (input->image_buffer[(row+1)*width + (col+1)] == 255)) || ((row-1 >= 0) && (col+1 < width) && (input->image_buffer[(row-1)*width + (col+1)] == 255)) || ((row-1 >= 0) && (col-1 >= 0) && (input->image_buffer[(row-1)*width + (col-1)] == 255)) || ((row-1 >= 0) && (input->image_buffer[(row-1)*width + col] == 255))) { final_output->image_buffer[pixel] = 255; } else { final_output->image_buffer[pixel] = 0; } } else { final_output->image_buffer[pixel] = input->image_buffer[pixel]; } } } } /** * Runs the sequential implementation of the Canny Edge algorithm. * it creates the necessary temporary frame_ptr for each step of the * algorithm and reuse some frame_ptr as is fit. **/ void runSequential(frame_ptr final_output) { printf("\t..... Running Sequential......\n"); // calculates the elapse time for the function. clock_t time_in_milli; time_in_milli = clock(); frame_ptr greyimage = input_frames[0]; frame_ptr blurimage = allocate_frame(greyimage->image_height, greyimage->image_width, 1); frame_ptr magnitude = allocate_frame(greyimage->image_height, greyimage->image_width, 1); // kernel mask for gaussian filter int k = 2; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); // blurs the image to remove noise. seq_gaussianBlur(greyimage, blurimage, kernel, k); // calculate gradient changes in the image to find edges // reuses greyimage frame_ptr to store pixels angles seq_gradientCalculation(blurimage, magnitude, greyimage, xGradient, yGradient); // non-maximum suppression // reusing blurimage frame_ptr as output for the maximum suppression // operation. seq_maxSuppression(magnitude, greyimage, blurimage); // hysteresis analysis-- edge tracking to find the relationship between // weak and strong edges. // blurimage refers to maxSuppressed output. seq_doubleThresholdAndHysteresis(blurimage, final_output, low_threshold, high_threshold); time_in_milli = clock() - time_in_milli; double inMilli = (((double) time_in_milli) / CLOCKS_PER_SEC) * 1000; printf("Elapsed Time in Milliseconds: %f\n", inMilli); // kill the frames allocated here destroy_frame(blurimage); destroy_frame(magnitude); } /****************************************************************************** ********************** DEVICE CODE STARTS HERE ******************************* ******************************************************************************/ /** * A cuda implementation of a guassian blurring algorithm. it uses a * (2k+1)*(2k+1) gaussian mask to do a convolution on the image and calculate * blurred pixels. No constant memory or shared memory is used here. * @param from an input image * @param to where the output is write. * @param kernel is the gaussian mask. * @param k is the integer described in the size of the gaussian mask. **/ __global__ void APPLY_GAUSSIAN_BLUR(float *kernel, int k, unsigned char *from, unsigned char *to, int height, int width) { int row, col, newpixel, k_len; newpixel = 0; // the new blurred pixel. k_len = 2*k + 1; // length of the kernel mask. col = threadIdx.x + blockIdx.x * blockDim.x; row = threadIdx.y + blockIdx.y * blockDim.y; // make sure it is a valid pixel. if (col < width && row < height) { for (int i = -1*k; i <= k; i++) { // iterates kernel row int k_offset = (i+k) * k_len; for (int j = -1*k; j <= k; j++) { // iterates kernel col int nrow = row + i; int ncol = col + j; // make sure the neighbor being considered for convolution actually exists. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) { newpixel = newpixel + kernel[k_offset + (j+k)] * from[nrow*width + ncol]; } } } // writes the blurred pixel to the "to" frame_ptr. to[row*width + col] = newpixel; } } /** * Applies a Sobel filter mask (either in the x or y direction) in a convolution over the neighbors of each pixel * handled by each thread. * @requires: from and to have the same dimension specified by height and width. * @requires: sobelKernel is 3x3 **/ __global__ void applySobelOperatorKernel(int *sobelKernel, unsigned char *from, unsigned char *to, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int newPixel = 0; // make sure it is a valid pixel. if (col < width && row < height) { // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // bounds checking. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) { newPixel = newPixel + sobelKernel[(i+1)*3 + (j+1)] * from[nrow*width + ncol]; } } } // normalize the out of bounds pixel values (> 255 or < 0) if (newPixel < 0) { newPixel = abs( newPixel); } if (newPixel > 255) { newPixel = 255; } // write it to the output to[row*width + col] = newPixel; } } /** * Given the gradient matrix in the x and y direction, this function computes the gradient magnitude * and angle which are used for non-maximum suppression. * @requires: Gx, Gy, magnitude, and pixel_angle have the same dimensions, specified by the height, width. **/ __global__ void pixelMagnitudeAndAngle(unsigned char *Gx, unsigned char *Gy, unsigned char *magnitude, unsigned char *pixel_angle, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure it is a valid pixel if (col < width && row < height) { int pixel = row*width + col; magnitude[pixel] = hypot((float) Gx[pixel], (float) Gy[pixel]); // gets the angle of the pixel and collapses it to the nearest horizontal, vertical, // or diagonal angle of (0, 45, 90, 135 degrees angles) by converting the angle // in radian to degrees. It also gets the absolute value of the angle to reduce redundancy // of two points on the opposite ends of the same diagonal line. That way we have, pi/2 and -pi/2 // mapping to pi/2, pi and 0 mapping to 0, etc. float arctan = atan2((float) Gy[pixel], (float) Gx[pixel]); float inDegrees = abs(arctan) * (180.0 / M_PI); // collapses the different angles into four categories depending on the // proximity of the angle found to each of the four. if (inDegrees > 0 && inDegrees <= 22.5) { pixel_angle[pixel] = 0; } else if (inDegrees > 22.5 && inDegrees <= 67.5) { pixel_angle[pixel] = 45; } else if (inDegrees > 67.5 && inDegrees <= 112.5) { pixel_angle[pixel] = 90; } else if (inDegrees > 112.5 && inDegrees <= 157.5) { pixel_angle[pixel] = 135; } else { // because we get absolute value, everything else is either 180 or 0 pixel_angle[pixel] = 0; } } } /** * Given the pixel gradient angle information, this function suppresses pixels that * are not local maximum in the direction dictated by their angle. This is the * non-maximum analysis stage. * @requires: all image matrix inputs have the dimension described in height and width. **/ __global__ void nonMaximumSuppression(unsigned char *magnitude, unsigned char *pixel_angle, unsigned char *final_suppression, int height, int width) { int front_pixel, back_pixel, pixel, row, col; col = threadIdx.x + blockIdx.x*blockDim.x; row = threadIdx.y + blockIdx.y*blockDim.y; // make sure it is a valid pixel. if (col < width && row < height) { pixel = row*width + col; // chooses a back and front neighbor based on whether the neighbor // in the direction given by pixel angle exists. if (pixel_angle[pixel] == 0) { back_pixel = (col-1 >= 0) ? magnitude[pixel-1] : 0; front_pixel = (col+1 < width) ? magnitude[pixel+1] : 0; } else if (pixel_angle[pixel] == 45) { back_pixel = ((row+1) < height && (col-1) >= 0) ? magnitude[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magnitude[(row-1)*width + (col+1)] : 0; } else if (pixel_angle[pixel] == 90) { back_pixel = (row - 1 >= 0) ? magnitude[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magnitude[(row+1)*width + col] : 0; } else if (pixel_angle[pixel] == 135) { back_pixel = (row-1 >= 0 && col-1 >= 0) ? magnitude[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magnitude[(row+1)*width + (col+1)] : 0; } else { printf("### BAD ANGLE: %d\n", pixel_angle[pixel]); } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magnitude[pixel] < back_pixel || magnitude[pixel] < front_pixel) { final_suppression[pixel] = 0; } else { final_suppression[pixel] = magnitude[pixel]; } } } /** * Given low and high thresholds, this function suppresses or keep pixels based on whether * they are greater the low_threshold or not. It standardizes all strong edges here. * @requires: image pixels have the dimension described by height and width. **/ __global__ void thresholdAnalysis(unsigned char *suppressed_pixels, unsigned char *output_pixels, int high_threshold, int low_threshold, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel. if (col < width && row < height) { int pixel = row*width + col; // suppress less than the low threshold. Standardize to strong edge if // greater than high threshold. if (suppressed_pixels[pixel] >= high_threshold) output_pixels[pixel] = 255; else if (suppressed_pixels[pixel] < high_threshold && suppressed_pixels[pixel] >= low_threshold) output_pixels[pixel] = low_threshold; else output_pixels[pixel] = 0; } } /** * Does hysteresis analysis to find relationship between weak edges and strong edges. * the output of this step is the final output of the edge detection algorithm. * @requires: dimension of image matrices are equal to height * width. **/ __global__ void hystEdgeTracking(unsigned char *threshold_pixels, unsigned char *output_pixels, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // checks if its a valid pixel. if (col < width && row < height) { // hysteresis edge tracking: we look at the neighbors of the pixel (row, col) and // if there is a strong neighbor, the pixel becomes strong. int pixel = row*width + col; // check if it is a weak edge or not. if (threshold_pixels[pixel] > 0 && threshold_pixels[pixel] < 255) { int found = 0; // check the neighbors of the weak edge to find if there is // a strong edge around. for (int i = -1; i <= 1; i++) { for (int j = -1; j <= 1; j++) { int nrow = row + i; int ncol = col + j; // make sure the neighbor exists. if (nrow >= 0 && ncol >= 0 && nrow < height && ncol < width) if (threshold_pixels[nrow*width + ncol] == 255) { found = 1; i = j = 3; } // declare the weak edge strong if it has a strong neighbor. if (found) output_pixels[pixel] = 255; else output_pixels[pixel] = 0; } } } else { output_pixels[pixel] = threshold_pixels[pixel]; } } } ///////////////////////////////////////////////////////////////////////////////////////// /** * A regular implementation for finding the gradient and angle using sobel operators. * Here instead of doing it one by one as above, we do everything together so that we * the optimized implementation seen in the sequential, constant memory, and shared memory * implementations. * @requires: the same specification as the other implementation above. ***/ __global__ void gradient_calculation(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int *xGradient, int *yGradient, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { // saves the row index here int nrow = row + i; for (int j = -1; j <= 1; j++) { int ncol = col + j; if (ncol >= 0 && nrow >= 0 && ncol < width && nrow < height) { xGrad = xGrad + xGradient[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; yGrad = yGrad + yGradient[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; } } } // saves the magnitude gradient value. magMatrix[row*width + col] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row*width + col] = angle_degrees; } } /** * A generic device non-maximum suppression algorithm. It is used for all three different * implementations. No need for constant memory or shared memory. It is an alternative to * the regular device implementation(nonMaxSuppression) which depends on collapsing angles * to either 0, 45, 90, or 135 degrees. * @requires: the parameters conform to the specification in nonMaxSuppression. **/ __global__ void non_max_suppression(unsigned char *magMatrix, unsigned char *angleMatrix, unsigned char *suppressedMatrix, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int back_pixel, front_pixel; int pixel = row*width + col; int angle = angleMatrix[pixel]; // chooses the direction of the angle and checks if // the pixel at (row, col) is a local maximum or not. // it is suppressed if it is not a local maximum, otherwise it is kept. if (angle > 0 && angle < 23) { // 0 degree angle back_pixel = (col-1 >= 0) ? magMatrix[pixel-1] : 0; front_pixel = (col+1 < width) ? magMatrix[pixel+1] : 0; } else if (angle >= 23 && angle < 68) { // 45 degree angle back_pixel = ((row+1) < height && (col-1) >= 0) ? magMatrix[(row+1)*width + (col-1)] : 0; front_pixel = ((row-1) >= 0 && (col+1) < width) ? magMatrix[(row-1)*width + (col+1)] : 0; } else if (angle >= 68 && angle < 113) { // 90 degree angle back_pixel = (row - 1 >= 0) ? magMatrix[(row-1)*width + col] : 0; front_pixel = (row + 1 < height) ? magMatrix[(row+1)*width + col] : 0; } else if (angle >= 113 && angle < 158) { // 135 degree angle back_pixel = (row-1 >= 0 && col-1 >= 0) ? magMatrix[(row-1)*width + (col-1)] : 0; front_pixel = ((row+1) < height && (col+1) < width) ? magMatrix[(row+1)*width + (col+1)] : 0; } else { // everything else is around 180 degrees. back_pixel = (col-1 >= 0) ? magMatrix[pixel-1] : 0; front_pixel = (col+1 < width) ? magMatrix[pixel+1] : 0; } // suppressing the pixel if it is not the global maximum // in the line described by its angle. if (magMatrix[pixel] < back_pixel || magMatrix[pixel] < front_pixel) { suppressedMatrix[pixel] = 0; } else { suppressedMatrix[pixel] = magMatrix[pixel]; } } } /***************************************************************************** ********************* WITH CONSTANT MEMORY AND CACHING ********************** *****************************************************************************/ /** * Constant memory for sobel operators mask and gaussian kernel mask. **/ __constant__ int xGradientMask[SOBEL_MASK_SIZE]; __constant__ int yGradientMask[SOBEL_MASK_SIZE]; __constant__ float GaussianMask[GAUSSIAN_MASK_SIZE]; /** * A guassian blur implementation that uses constant memory (GaussianMask) declared above. * It requires that the kernel mask generated for the convolution is copied to the constant * memory. * Everything else happens like the regular device gaussian implementation above. **/ __global__ void const_mem_gaussian_blur(unsigned char *inputMatrix, unsigned char *blurMatrix, int k, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int blurPixel = 0; int kernelLen = 2*k + 1; // make sure it is a valid pixel. if (col < width && row < height) { // do convolution by iterating over all the neighbors // of the pixel (row, col). for (int i = -1*k; i <= k; i++) { int nrow = row + i; int offset = (i+k) * kernelLen; for (int j = -1*k; j <= k; j++) { int ncol = col + j; // make sure the neighbor exists. if (ncol < width && ncol >= 0 && nrow < height && nrow >= 0) { blurPixel = blurPixel + GaussianMask[offset + (j+k)] * inputMatrix[nrow*width + ncol]; } } } // write the pixel output. blurMatrix[row*width + col] = blurPixel; } } /** * This function finds the gradient magnitude and gradient angle of the blurMatrix * by convolving sobelMasks (in the y and x directions) with the blurMatrix. * The gradient in the x and y directions are found in place to avoid using extra space * and computational time. **/ __global__ void const_mem_sobel_filter(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int height, int width) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // make sure its a valid pixel if (col < width && row < height) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = -1; i <= 1; i++) { // saves the row index here int nrow = row + i; for (int j = -1; j <= 1; j++) { int ncol = col + j; if (ncol >= 0 && nrow >= 0 && ncol < width && nrow < height) { xGrad = xGrad + xGradientMask[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; yGrad = yGrad + yGradientMask[(i+1)*3 + (j+1)] * blurMatrix[nrow*width + ncol]; } } } // saves the magnitude gradient value. magMatrix[row*width + col] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row*width + col] = angle_degrees; } } /**************************************************************************** ********************** TILING AND SHARED MEMORY **************************** ****************************************************************************/ /** * Defines the relationship between tileWidth and BlockWidth for * the use of shared memory and pulling data from global memory. * blockWidth = tileWidth + kernelLength - 1; **/ #define SHARED_MEM_TILE_WIDTH 4 #define SHARED_MEM_FOR_SOBEL (SHARED_MEM_TILE_WIDTH + 2) #define GAUSSIAN_LEN 5 #define SOBEL_LEN 3 #define G_LEN (SHARED_MEM_TILE_WIDTH + GAUSSIAN_LEN - 1); #define S_LEN (SHARED_MEM_FOR_SOBEL + SOBEL_LEN - 1); // for regular blocks-- without using tiling. #define REG_BLOCK_LEN 32 // makes it compatible for CUDA. For some reason, couldnt // assign the immediate #defines __constant__ const int gausLen = G_LEN; __constant__ const int sobelLen = S_LEN; /** * This is an extension over the const_mem_gaussian_blur algorithm described above. * The only addition is the used of shared memory and tiling to locally saved * global data into a shared memory. * @requires: something is copied to the GaussianMask constant memory. * @requires: the relationship between tileWidth and BlockWidth is preserved here. * @requires: dimension of matrices should match the height and width argument. **/ __global__ void const_shared_mem_gaussian_blur(unsigned char *inputMatrix, unsigned char *blurMatrix, int k, int tileWidth, int height, int width) { int tx = threadIdx.x; int ty = threadIdx.y; int col_o = blockIdx.x * tileWidth + tx; // index to the output int row_o = blockIdx.y * tileWidth + ty; // index to the output. int kernelLen = 2*k + 1; int col_i = col_o - (kernelLen/2); // where to draw the data for shared memory from. int row_i = row_o - (kernelLen/2); // where to draw the data for shared memory from. int blurPixel = 0; // shared memory per block. __shared__ unsigned char shared_tile[gausLen][gausLen]; // retrieving data from the global memory to the shared tile memory. // makes sure a thread is working on a valid. if ((row_i >= 0) && (col_i >= 0) && (row_i < height) && (col_i < width)) { shared_tile[ty][tx] = inputMatrix[row_i*width + col_i]; } else { // put 0 in the place of invalid pixels. shared_tile[ty][tx] = 0; } __syncthreads(); // make sure the thread is supposed to be doing computations. if (ty < tileWidth && tx < tileWidth) { // convolution happens here by iterating over the neighbors of // the pixel. for (int i = 0; i < kernelLen; i++) for (int j = 0; j < kernelLen; j++) blurPixel = blurPixel + GaussianMask[i*kernelLen + j]*shared_tile[i+ty][j+tx]; // make sure the output indices are valid. if (row_o < height && col_o < width) { blurMatrix[row_o*width + col_o] = blurPixel; } } } /** * This is also an extension of the constant memory sobel filter device kernel above. * In addition to using constant memory, it also uses shared memory to compute the gradient * magnitude and direction from the blurMatrix. * @requires: GaussianMask, and xGradientMask and yGradientMask have the appropriate data copied in. * @requires: the relationship between tileWidth and blockWidth is reserved. * @requires: dimensions match the arguments given. **/ __global__ void const_shared_mem_sobel_filter(unsigned char *blurMatrix, unsigned char *magMatrix, unsigned char *angleMatrix, int k, int tileWidth, int height, int width) { int tx = threadIdx.x; int ty = threadIdx.y; // output indices int col_o = blockIdx.x * tileWidth + tx; int row_o = blockIdx.y * tileWidth + ty; // input indices. int col_i = col_o - 1; int row_i = row_o - 1; // shared memory for the block __shared__ unsigned char shared_tile[sobelLen][sobelLen]; // retrieving data from the global memory to the shared tile memory. // makes sure a thread is working on a valid. if ((row_i >= 0) && (col_i >= 0) && (row_i < height) && (col_i < width)) { shared_tile[ty][tx] = blurMatrix[row_i*width + col_i]; } else { // put 0 in the place of invalid pixels. shared_tile[ty][tx] = 0; } __syncthreads(); // make sure indices are within tileWith if (ty < tileWidth && tx < tileWidth) { int xGrad = 0; int yGrad = 0; // convolve the sobel operator kernel with // the pixels next to the pixel (row,col) // and saves the new result to (row,col) for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { xGrad = xGrad + xGradientMask[i*3 + j] * shared_tile[i+ty][j+tx]; yGrad = yGrad + yGradientMask[i*3 + j] * shared_tile[i+ty][j+tx]; } } if (row_o < height && col_o < width) { // saves the magnitude gradient value. magMatrix[row_o*width + col_o] = hypot((float) xGrad, (float) yGrad); // finds the pixel angle in degrees. float angle_radians = atan2((float) yGrad, (float) xGrad); int angle_degrees = abs(angle_radians) * (180.0 / M_PI); angleMatrix[row_o*width + col_o] = angle_degrees; } } } /****************************************************************************** ********************** DEVICE CODE ENDS HERE ********************************* ******************************************************************************/ /** * Computes a Gaussian convolutional kernel for a given size. A gaussian * kernel is given by: * => H_{i,j} = (1/2*pi*std_dev^2)*exp(-[(i-(k+1))^2 + (j-(k+1))^2] / (2*std_dev^2)) * * The result of the operation is written to the integer buffer given. * @param kernel is a 1-dimensional array to contain kernel values. Indexing into * the array is given by "row*kernel_length + col". * @param kernel_length is the height/width of the kernel. For every kernel (height = width). * @param std_dev is the standard deviation of values to consider when averaging * neighboring pixels. * * The idea for normalizing: * https://stackoverflow.com/questions/8204645/implementing-gaussian-blur-how-to-calculate-convolution-matrix-kernel */ void set_convo_kernel(float *kernel, int k, float sigma) { if (k < 1) { printf("For Gaussian kernel, k is undefined: %d\n", k); exit(1); } if (sigma <= 0) { printf("Standard Deviation < 0: %f\n", sigma); exit(1); } //initializes constants of the Gaussian kernel. int kernLen = 2*k + 1; float sigmaSqCons = 2 * sigma * sigma; float sigmaPiCons = 1.0 / (sigmaSqCons * M_PI); int k_inc = k + 1; // iterates and fills rows and columns of the kernel float sum = 0.0; for (int i = 1; i <= kernLen; i++) { int row_offset = (i - 1) * kernLen; for (int j = 1; j <= kernLen; j++) { int index = row_offset + (j - 1); float i_pow = pow(i - k_inc, 2.0); float j_pow = pow(j - k_inc, 2.0); float val = sigmaPiCons * exp(-1 * (i_pow + j_pow) / sigmaSqCons); sum = sum + val; kernel[index] = val; } } // Normalize the kernel for (int x = 0; x < kernLen; ++x) for (int y = 0; y < kernLen; ++y) kernel[x*kernLen + y] /= sum; } /** * Finds the strongest pixel intensity value in an image and sets the upper threshold as * a 0.7 * highest_pixel_intensity. * @requires: pixels_ptr->num_components = 1. * @param pixels_ptr is an array of pixels for an image. **/ int maxPixelIntensity(frame_ptr imgPixels) { int max = 0; for (int i = 0; i < imgPixels->image_height; i++) { for (int j = 0; j < imgPixels->image_width; j++) { if (imgPixels->row_pointers[i][j] > max) max = imgPixels->row_pointers[i][j]; } } return max; } /** * allocates space on the device memory and writes the address to the * memory location to the d_pointer. * @param: d_pointer is the location of the pointer to the memory allocated * on the device * @param: numBytes is the number of bytes to allocate on the device. **/ void setDevMemory(void **d_pointer, int numBytes) { if (cudaMalloc(d_pointer, numBytes) != cudaSuccess) { printf("### CANT SET DEVICE MEMORY: %p\n", d_pointer); exit(1); } } /** * Copies bytes from one memory location to another. The memory locations * can either be on the device, host, or both. This is just a wrapper function * for the cuda implementation, cudaMemcpy(). * @param: dst where the bytes should be copied to. * @param: src the location that contains the bytes. * @param: numBytes is how many bytes there is to copy. * @param: dir indicates whether to copy from device to device, device to host, * host to device, or host to host. **/ void cpyMemory(void *dst, void *src, int numBytes, cudaMemcpyKind dir) { if (cudaMemcpy(dst, src, numBytes, dir) != cudaSuccess) { printf("### MEM CPY FAIL : %p -> %p\n", src, dst); exit(1); } } /** * Checks if the last device kernel was successfully executed or not. **/ void checkErrorForLastKernel() { if (cudaGetLastError() != cudaSuccess) { printf("### Kernel Execution failed ###\n"); exit(1); } } /** * This function allocates the necessary resources for executing a kernel * that uses constant memory and shared memory (using tiling) to do convolution * over image as it processes it for edge detection. * -- xGradient, yGradient are declared as globalconstant memory * -- gaussianMask is also declared as global constant memory * Any thread working on convolution reads mask data from the global constant memory, * participate in tiling and pulling data from global memory, and write to the * global memory. * @param result is the output frame_ptr to write into. ***/ void run_const_shared_mem_kernel(frame_ptr result) { // setting a gaussian convolution kernel as a 5x5. int k = 2; int kernel_len = 2*k + 1; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); /** * timing kernel execution */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // copies the local gaussian mask into the constant memory declared // globally. if (cudaMemcpyToSymbol(GaussianMask, kernel, GAUSSIAN_MASK_SIZE * sizeof(int)) != cudaSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } if (cudaMemcpyToSymbol(xGradientMask, xGradient, 9 * sizeof(int)) != cudaSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } if (cudaMemcpyToSymbol(yGradientMask, yGradient, 9 * sizeof(int)) != cudaSuccess) { printf("Couldn't write to the global constant memory\n"); exit(1); } // image matrix information and the device pointers to store // input matrix and output matrix. unsigned char *d_from, *d_to, *d_magMatrix; int height = input_frames[0]->image_height; int width = input_frames[0]->image_width; int size = height * width; // allocate the space for input and output on the device // and copy the host input to the device input. setDevMemory((void**) &d_from, size); setDevMemory((void**) &d_to, size); setDevMemory((void**) &d_magMatrix, size); cpyMemory(d_from, input_frames[0]->image_buffer, size, cudaMemcpyHostToDevice); // setting up tileWidth information for block and grid dimensions // of the kernel to execute. int tileWidth = SHARED_MEM_TILE_WIDTH; int tileWidthSobel = SHARED_MEM_FOR_SOBEL; // when doing gaussian filter, a 5x5 int blockWidth = tileWidth + kernel_len - 1; dim3 dimBlock(blockWidth, blockWidth, 1); // when doing sobel filters, a 3x3 kernel int blockWidth_Sobel = tileWidthSobel + 2; dim3 sobelBlock(blockWidth_Sobel, blockWidth_Sobel, 1); // a grid for shared memory, and a regular grid for anything else. dim3 dimGrid((width - 1)/tileWidth + 1, (height - 1)/tileWidth + 1, 1); #ifdef RUN_CONST_SHARED_MEM_KERNEL // the way to do regular gridding when using shared memory. dim3 regGrid((width - 1)/blockWidth + 1, (height - 1)/blockWidth + 1, 1); printf("\t......Running Const_Shared_Mem_Kernel.....\n"); // launching a kernel to perform a guassian blur on the image input. const_shared_mem_gaussian_blur<<<dimGrid, dimBlock>>> (d_from, d_to, k, tileWidth, height, width); checkErrorForLastKernel(); // launching a kernel that performs sobel gradient analysis and // writes the result of gradient magnitude and pixel angle into // the matrices d_magMatrix and d_from, respectively const_shared_mem_sobel_filter<<<dimGrid, sobelBlock /*dimBlock*/>>> (d_to, d_magMatrix, d_from, k, tileWidth, height, width); checkErrorForLastKernel(); #else blockWidth = REG_BLOCK_LEN; dim3 regGrid((width - 1)/blockWidth + 1, (height - 1)/blockWidth + 1, 1); dimBlock.x = blockWidth; dimBlock.y = blockWidth; printf("\t.....Running Const_Mem_Kernel.....\n"); // launching a kernel to perform a gaussian blur with constant // memory but without shared memory. const_mem_gaussian_blur<<<regGrid, dimBlock>>> (d_from, d_to, k, height, width); checkErrorForLastKernel(); // launching a kernel that performs sobel gradient analysis with constant // memory but without using shared memory. const_mem_sobel_filter<<<regGrid, dimBlock>>> (d_to, d_magMatrix, d_from, height, width); #endif // calls the non maximum suppression algorithm for a regular non constant // non-shared memory implementation non_max_suppression<<<regGrid, dimBlock>>> (d_magMatrix, d_from, d_to, height, width); checkErrorForLastKernel(); thresholdAnalysis<<<regGrid, dimBlock>>> (d_to, d_from, high_threshold, low_threshold, height, width); checkErrorForLastKernel(); // final step. calls the regular hysteresis analysis. hystEdgeTracking<<<regGrid, dimBlock>>> (d_from, d_to, height, width); checkErrorForLastKernel(); cpyMemory(result->image_buffer, d_to, size, cudaMemcpyDeviceToHost); // synchronizing the start and stop times to get the // elapsed time. cudaEventRecord(stop); cudaEventSynchronize(stop); float time_inMilli = 0; cudaEventElapsedTime(&time_inMilli, start, stop); // prints the elapsed time. printf("Kernel Elapsed Time in ms: %.8f\n", time_inMilli); cudaFree(d_from); cudaFree(d_magMatrix); cudaFree(d_to); } // This sets up GPU device by allocating the required memory and then // calls the kernel on GPU. (You might choose to add/remove arguments.) // It's currently set up to use the global variables and write its // final results into the specified argument. void runKernel(frame_ptr result) { // testing set_convo_kernel int k = 2; int kernel_len = 2*k + 1; float kernel[GAUSSIAN_MASK_SIZE]; float sigma = SIGMA; set_convo_kernel(kernel, k, sigma); float total = 0.0; for (int i = 0; i < kernel_len; i++) { for (int j = 0; j < kernel_len; j++) { total = total + kernel[i*kernel_len + j]; printf("%5d", (int) round(159 * kernel[i*kernel_len + j])); } printf("\n"); } printf("Gaussian Total: %.5f\n", total); ///////////////////////////////////////////////////////////// unsigned char *d_from, *d_to, *d_final_to, *d_magnitude, *d_pixel_angle, *d_final_suppression; int height = input_frames[0]->image_height; int width = input_frames[0]->image_width; int size = height * width; printf("\t......Running Regular Kernel......\n"); // cudaEvents to record the elapse time for kernel execution. cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // allocates to and from frame_ptrs and copies the global // frame_ptrs into the device memory. Exits when cudaMalloc/cudaMemcpy fails. setDevMemory((void**) &d_from, size); setDevMemory((void**) &d_to, size); setDevMemory((void**) &d_final_to, size); setDevMemory((void**) &d_magnitude, size); setDevMemory((void**) &d_pixel_angle, size); setDevMemory((void**) &d_final_suppression, size); cpyMemory(d_from, input_frames[0]->image_buffer, size, cudaMemcpyHostToDevice); /// allocates space for the kernel weights and copies the /// kernel computed to the device memory. float *d_kernel; int k_numBytes = (kernel_len * kernel_len * sizeof(d_kernel[0])); setDevMemory((void**) &d_kernel, k_numBytes); cpyMemory(d_kernel, kernel, k_numBytes, cudaMemcpyHostToDevice); //////////////////////////////////////////////////////////////// // sets the block and grid dimensions. int block_side = REG_BLOCK_LEN; dim3 dimBlock(block_side, block_side, 1); dim3 dimGrid(ceil(width/ (float) block_side), ceil(height/ (float) block_side), 1); // kernel call to blur an image. APPLY_GAUSSIAN_BLUR<<<dimGrid, dimBlock>>>(d_kernel, k, d_from, d_to, height, width); checkErrorForLastKernel(); #ifdef RUNNING_INDIVIDUAL_STEPS // copies the result of Gaussian filter into the // from pointer to start the gradient kernel int g_numBytes = 9 * sizeof(xGradient[0]); int *d_sobelKernel; /// allocates space for a sobel kernel and copies one of the gradient kernels. setDevMemory((void**) &d_sobelKernel, g_numBytes); cpyMemory(d_sobelKernel, xGradient, g_numBytes, cudaMemcpyHostToDevice); /// copies the result of the first kernel as input to the second kernel. cpyMemory(d_from, d_to, size, cudaMemcpyDeviceToDevice); // Kernel call to apply Sobel Operator applySobelOperatorKernel <<<dimGrid, dimBlock>>>(d_sobelKernel, d_from, d_to, height, width); checkErrorForLastKernel(); cpyMemory(d_sobelKernel, yGradient, 9 * sizeof(yGradient[0]), cudaMemcpyHostToDevice); // Kernel call to apply Sobel Operator applySobelOperatorKernel <<<dimGrid, dimBlock>>>(d_sobelKernel, d_from, d_final_to, height, width); checkErrorForLastKernel(); // immediately frees gradient space allocated. cudaFree(d_sobelKernel); // gradient magnitude and angle analysis pixelMagnitudeAndAngle <<<dimGrid, dimBlock>>>(d_to, d_final_to, d_magnitude, d_pixel_angle, height, width); checkErrorForLastKernel(); // non-maximum suppression analysis. nonMaximumSuppression <<<dimGrid, dimBlock>>>(d_magnitude, d_pixel_angle, d_final_suppression, height, width); checkErrorForLastKernel(); #else // copies the result of Gaussian filter into the // from pointer to start the gradient kernel int g_numBytes = 9 * sizeof(xGradient[0]); int *d_xGradient, *d_yGradient; /// allocates space for the two sobel kernel and copies one of the gradient kernels. setDevMemory((void**) &d_xGradient, g_numBytes); setDevMemory((void**) &d_yGradient, g_numBytes); cpyMemory(d_xGradient, xGradient, g_numBytes, cudaMemcpyHostToDevice); cpyMemory(d_yGradient, yGradient, g_numBytes, cudaMemcpyHostToDevice); // calculates gradient and angle information in one phase. gradient_calculation<<<dimGrid, dimBlock>>> (d_to, d_magnitude, d_pixel_angle, d_xGradient, d_yGradient, height, width); checkErrorForLastKernel(); // non maximum suppression using the angle and magnitude found above. non_max_suppression<<<dimGrid, dimBlock>>> (d_magnitude, d_pixel_angle, d_final_suppression, height, width); checkErrorForLastKernel(); cudaFree(d_xGradient); cudaFree(d_yGradient); #endif /// double threshold analysis. thresholdAnalysis <<<dimGrid, dimBlock>>>(d_final_suppression, d_magnitude, high_threshold, low_threshold, height, width); checkErrorForLastKernel(); // hysteresis analysis - edge tracking to find relationship // between weak edges and strong edges. hystEdgeTracking <<<dimGrid, dimBlock>>>(d_magnitude, d_to, height, width); checkErrorForLastKernel(); cudaEventRecord(stop); // copies the results from the device memory into the // host output frame_ptr cpyMemory(result->image_buffer, d_to, size, cudaMemcpyDeviceToHost); // synchronizing the start and stop times to get the // elapsed time. cudaEventSynchronize(stop); float time_inMilli = 0; cudaEventElapsedTime(&time_inMilli, start, stop); // prints the elapsed time in ms. printf("Kernel Elapsed Time in ms: %.8f\n", time_inMilli); // frees device resources cudaFree(d_from); cudaFree(d_to); cudaFree(d_kernel); cudaFree(d_final_to); cudaFree(d_pixel_angle); cudaFree(d_magnitude); cudaFree(d_final_suppression); } /** * Host main routine */ int main(int argc, char **argv) { if(argc < 3){ fprintf(stderr, "ERROR: Need to specify input file and then output file\n"); exit(1); } input_frames[0] = read_JPEG_file(argv[1]); // Load input file prepareKernelCall(); // Do the actual work including calling CUDA kernel write_JPEG_file(argv[2], output_frames[0], 75); // Write output file //runKernel(NULL); return 0; }
d4af8f0e8965c6770a5f36b8f926b31e61d0c91c.hip
// !!! This is a file automatically generated by hipify!!! /* * main.c * Fusion: Abstractions for Multicore/Manycore Heterogenous Parallel Programming Using GPUs Anderson Boettge Pinheiro, Francisco Heron de Carvalho Junior, Neemias Gabriel Pena Batista Arruda, Tiago Carneiro */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <omp.h> #define mat(i,j) mat_h[i*N+j] #define mat_h(i,j) mat_h[i*N+j] #define mat_d(i,j) mat_d[i*N_l+j] #define mat_block(i,j) mat_block[i*N_l+j] #define proximo(x) x+1 #define anterior(x) x-1 #define MAX 8192 #define INFINITO 999999 #define ZERO 0 #define ONE 1 #define _VAZIO_ -1 #define _VISITADO_ 1 #define _NAO_VISITADO_ 0 int qtd = 0; int custo = 0; int N; int melhor = INFINITO; int upper_bound; int mat_h[MAX]; #define CUDA_CHECK_RETURN(value) { \ hipError_t _m_cudaStat = value; \ if (_m_cudaStat != hipSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} void read() { int i; scanf("%d", &N); for (i = 0; i < (N * N); i++) { scanf("%d", &mat_h[i]); } } unsigned long long int calculaNPrefixos(const int nivelPrefixo, const int nVertice) { unsigned long long int x = nVertice - 1; int i; for (i = 1; i < nivelPrefixo-1; ++i) { x *= nVertice - i-1; } return x; } void fillFixedPaths(short* preFixo, int nivelPrefixo) { char flag[50]; int vertice[50]; int cont = 0; int i, nivel; for (i = 0; i < N; ++i) { flag[i] = 0; vertice[i] = -1; } vertice[0] = 0; flag[0] = 1; nivel = 1; while (nivel >= 1){ if (vertice[nivel] != -1) { flag[vertice[nivel]] = 0; } do { vertice[nivel]++; } while (vertice[nivel] < N && flag[vertice[nivel]]); if (vertice[nivel] < N) { flag[vertice[nivel]] = 1; nivel++; if (nivel == nivelPrefixo) { for (i = 0; i < nivelPrefixo; ++i) { preFixo[cont * nivelPrefixo + i] = vertice[i]; } cont++; nivel--; } } else { vertice[nivel] = -1; nivel--; } } } __global__ void dfs_cuda_UB_stream(int N,int stream_size, int *mat_d, short *preFixos_d, int nivelPrefixo, int upper_bound, int *sols_d, int *melhorSol_d) { register int idx = blockIdx.x * blockDim.x + threadIdx.x; register int flag[16]; register int vertice[16]; register int N_l = N; register int i, nivel; register int custo; register int qtd_solucoes_thread = 0; register int UB_local = upper_bound; register int nivelGlobal = nivelPrefixo; int stream_size_l = stream_size; if (idx < stream_size_l) { for (i = 0; i < N_l; ++i) { vertice[i] = _VAZIO_; flag[i] = _NAO_VISITADO_; } vertice[0] = 0; flag[0] = _VISITADO_; custo= ZERO; for (i = 1; i < nivelGlobal; ++i) { vertice[i] = preFixos_d[idx * nivelGlobal + i]; flag[vertice[i]] = _VISITADO_; custo += mat_d(vertice[i-1],vertice[i]); } nivel=nivelGlobal; while (nivel >= nivelGlobal ) { if (vertice[nivel] != _VAZIO_) { flag[vertice[nivel]] = _NAO_VISITADO_; custo -= mat_d(vertice[anterior(nivel)],vertice[nivel]); } do { vertice[nivel]++; } while (vertice[nivel] < N_l && flag[vertice[nivel]]); if (vertice[nivel] < N_l) { custo += mat_d(vertice[anterior(nivel)],vertice[nivel]); flag[vertice[nivel]] = _VISITADO_; nivel++; if (nivel == N_l) { ++qtd_solucoes_thread; if (custo + mat_d(vertice[anterior(nivel)],0) < UB_local) { UB_local = custo + mat_d(vertice[anterior(nivel)],0); } nivel--; } } else { vertice[nivel] = _VAZIO_; nivel--; } } sols_d[idx] = qtd_solucoes_thread; melhorSol_d[idx] = UB_local; } } void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } int callCompleteEnumStreams(const int nivelPreFixos){ int *mat_d; int otimo_global = INFINITO; int *qtd_threads_streams; int qtd_sols_global = 0; int nPreFixos = calculaNPrefixos(nivelPreFixos,N); int block_size =192; int *sols_h, *sols_d; int *melhorSol_h, *melhorSol_d; short * path_h = (short*) malloc(sizeof(short) * nPreFixos * nivelPreFixos); short * path_d; /* Variaveis para os streams*/ const int chunk = 192*10; const int numStreams = nPreFixos / chunk + (nPreFixos % chunk == 0 ? 0 : 1); const int num_blocks = chunk/block_size + (chunk % block_size == 0 ? 0 : 1); int resto = 0; resto = (nPreFixos % chunk); qtd_threads_streams = (int*)malloc(sizeof(int)*numStreams); /* * Setando qtd de threads do stream * */ if(numStreams>1){ for(int i = 0; i<numStreams-1 / block_size;++i){ qtd_threads_streams[i] = chunk; } if(resto>0){ qtd_threads_streams[numStreams-1] = resto; } } else qtd_threads_streams[0] = resto; CUDA_CHECK_RETURN( hipMalloc((void **) &path_d, nPreFixos*nivelPreFixos*sizeof(short))); sols_h = (int*)malloc(sizeof(int)*nPreFixos); melhorSol_h = (int*)malloc(sizeof(int)*nPreFixos); CUDA_CHECK_RETURN( hipMalloc((void **) &mat_d, N * N * sizeof(int))); fillFixedPaths(path_h, nivelPreFixos); CUDA_CHECK_RETURN( hipMemcpy(mat_d, mat_h, N * N * sizeof(int), hipMemcpyHostToDevice)); for(int i = 0; i<nPreFixos; ++i) melhorSol_h[i] = INFINITO; CUDA_CHECK_RETURN( hipMalloc((void **) &melhorSol_d, sizeof(int)*nPreFixos)); CUDA_CHECK_RETURN( hipMalloc((void **) &sols_d, sizeof(int)*nPreFixos)); hipStream_t vectorOfStreams[numStreams]; for(int stream_id=0; stream_id<numStreams; stream_id++) hipStreamCreate(&vectorOfStreams[stream_id]); for(int stream_id=0; stream_id<numStreams; stream_id++) hipMemcpyAsync(&path_d[stream_id*chunk*nivelPreFixos], &path_h[stream_id*chunk*nivelPreFixos], qtd_threads_streams[stream_id]*sizeof(short)*nivelPreFixos, hipMemcpyHostToDevice,vectorOfStreams[stream_id]); for(int stream_id=0; stream_id<numStreams; stream_id++){ hipMemcpyAsync(&melhorSol_d[stream_id*chunk], &melhorSol_h[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), hipMemcpyHostToDevice, vectorOfStreams[stream_id]); } for(int stream_id=0; stream_id<numStreams; stream_id++){ hipMemcpyAsync(&sols_d[stream_id*chunk], &sols_h[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), hipMemcpyHostToDevice,vectorOfStreams[stream_id]); } for(int stream_id=0; stream_id<numStreams; stream_id++){ hipLaunchKernelGGL(( dfs_cuda_UB_stream), dim3(num_blocks),dim3(block_size),0,vectorOfStreams[stream_id], N,qtd_threads_streams[stream_id],mat_d, &path_d[stream_id*chunk*nivelPreFixos], nivelPreFixos,999999, &sols_d[stream_id*chunk],&melhorSol_d[stream_id*chunk]); } for(int stream_id=0; stream_id<numStreams; stream_id++) hipMemcpyAsync(&sols_h[stream_id*chunk],&sols_d[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), hipMemcpyDeviceToHost,vectorOfStreams[stream_id]); for(int stream_id=0;stream_id<numStreams; stream_id++) hipMemcpyAsync(&melhorSol_h[stream_id*chunk],&melhorSol_d[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), hipMemcpyDeviceToHost,vectorOfStreams[stream_id]); hipDeviceSynchronize(); for(int i = 0; i<nPreFixos; ++i){ qtd_sols_global+=sols_h[i]; if(melhorSol_h[i]<otimo_global) otimo_global = melhorSol_h[i]; } printf("\n\n\n\t niveis preenchidos: %d.\n",nivelPreFixos); printf("\t Numero de streams: %d.\n",numStreams); printf("\t Tamanho do stream: %d.\n",chunk); printf("\nQuantidade de solucoes encontradas: %d.", qtd_sols_global); printf("\n\tOtimo global: %d.\n\n", otimo_global); CUDA_CHECK_RETURN( hipFree(mat_d)); CUDA_CHECK_RETURN( hipFree(sols_d)); CUDA_CHECK_RETURN( hipFree(path_d)); CUDA_CHECK_RETURN( hipFree(melhorSol_d)); return otimo_global; } int main() { read(); int niveis = 5; printf("\n\nEnumeracao com streams:\n\n"); callCompleteEnumStreams(niveis); return 0; }
d4af8f0e8965c6770a5f36b8f926b31e61d0c91c.cu
/* * main.c * Fusion: Abstractions for Multicore/Manycore Heterogenous Parallel Programming Using GPUs Anderson Boettge Pinheiro, Francisco Heron de Carvalho Junior, Neemias Gabriel Pena Batista Arruda, Tiago Carneiro */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <cuda.h> #include <omp.h> #define mat(i,j) mat_h[i*N+j] #define mat_h(i,j) mat_h[i*N+j] #define mat_d(i,j) mat_d[i*N_l+j] #define mat_block(i,j) mat_block[i*N_l+j] #define proximo(x) x+1 #define anterior(x) x-1 #define MAX 8192 #define INFINITO 999999 #define ZERO 0 #define ONE 1 #define _VAZIO_ -1 #define _VISITADO_ 1 #define _NAO_VISITADO_ 0 int qtd = 0; int custo = 0; int N; int melhor = INFINITO; int upper_bound; int mat_h[MAX]; #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} void read() { int i; scanf("%d", &N); for (i = 0; i < (N * N); i++) { scanf("%d", &mat_h[i]); } } unsigned long long int calculaNPrefixos(const int nivelPrefixo, const int nVertice) { unsigned long long int x = nVertice - 1; int i; for (i = 1; i < nivelPrefixo-1; ++i) { x *= nVertice - i-1; } return x; } void fillFixedPaths(short* preFixo, int nivelPrefixo) { char flag[50]; int vertice[50]; int cont = 0; int i, nivel; for (i = 0; i < N; ++i) { flag[i] = 0; vertice[i] = -1; } vertice[0] = 0; flag[0] = 1; nivel = 1; while (nivel >= 1){ if (vertice[nivel] != -1) { flag[vertice[nivel]] = 0; } do { vertice[nivel]++; } while (vertice[nivel] < N && flag[vertice[nivel]]); if (vertice[nivel] < N) { flag[vertice[nivel]] = 1; nivel++; if (nivel == nivelPrefixo) { for (i = 0; i < nivelPrefixo; ++i) { preFixo[cont * nivelPrefixo + i] = vertice[i]; } cont++; nivel--; } } else { vertice[nivel] = -1; nivel--; } } } __global__ void dfs_cuda_UB_stream(int N,int stream_size, int *mat_d, short *preFixos_d, int nivelPrefixo, int upper_bound, int *sols_d, int *melhorSol_d) { register int idx = blockIdx.x * blockDim.x + threadIdx.x; register int flag[16]; register int vertice[16]; register int N_l = N; register int i, nivel; register int custo; register int qtd_solucoes_thread = 0; register int UB_local = upper_bound; register int nivelGlobal = nivelPrefixo; int stream_size_l = stream_size; if (idx < stream_size_l) { for (i = 0; i < N_l; ++i) { vertice[i] = _VAZIO_; flag[i] = _NAO_VISITADO_; } vertice[0] = 0; flag[0] = _VISITADO_; custo= ZERO; for (i = 1; i < nivelGlobal; ++i) { vertice[i] = preFixos_d[idx * nivelGlobal + i]; flag[vertice[i]] = _VISITADO_; custo += mat_d(vertice[i-1],vertice[i]); } nivel=nivelGlobal; while (nivel >= nivelGlobal ) { if (vertice[nivel] != _VAZIO_) { flag[vertice[nivel]] = _NAO_VISITADO_; custo -= mat_d(vertice[anterior(nivel)],vertice[nivel]); } do { vertice[nivel]++; } while (vertice[nivel] < N_l && flag[vertice[nivel]]); if (vertice[nivel] < N_l) { custo += mat_d(vertice[anterior(nivel)],vertice[nivel]); flag[vertice[nivel]] = _VISITADO_; nivel++; if (nivel == N_l) { ++qtd_solucoes_thread; if (custo + mat_d(vertice[anterior(nivel)],0) < UB_local) { UB_local = custo + mat_d(vertice[anterior(nivel)],0); } nivel--; } } else { vertice[nivel] = _VAZIO_; nivel--; } } sols_d[idx] = qtd_solucoes_thread; melhorSol_d[idx] = UB_local; } } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } int callCompleteEnumStreams(const int nivelPreFixos){ int *mat_d; int otimo_global = INFINITO; int *qtd_threads_streams; int qtd_sols_global = 0; int nPreFixos = calculaNPrefixos(nivelPreFixos,N); int block_size =192; int *sols_h, *sols_d; int *melhorSol_h, *melhorSol_d; short * path_h = (short*) malloc(sizeof(short) * nPreFixos * nivelPreFixos); short * path_d; /* Variaveis para os streams*/ const int chunk = 192*10; const int numStreams = nPreFixos / chunk + (nPreFixos % chunk == 0 ? 0 : 1); const int num_blocks = chunk/block_size + (chunk % block_size == 0 ? 0 : 1); int resto = 0; resto = (nPreFixos % chunk); qtd_threads_streams = (int*)malloc(sizeof(int)*numStreams); /* * Setando qtd de threads do stream * */ if(numStreams>1){ for(int i = 0; i<numStreams-1 / block_size;++i){ qtd_threads_streams[i] = chunk; } if(resto>0){ qtd_threads_streams[numStreams-1] = resto; } } else qtd_threads_streams[0] = resto; CUDA_CHECK_RETURN( cudaMalloc((void **) &path_d, nPreFixos*nivelPreFixos*sizeof(short))); sols_h = (int*)malloc(sizeof(int)*nPreFixos); melhorSol_h = (int*)malloc(sizeof(int)*nPreFixos); CUDA_CHECK_RETURN( cudaMalloc((void **) &mat_d, N * N * sizeof(int))); fillFixedPaths(path_h, nivelPreFixos); CUDA_CHECK_RETURN( cudaMemcpy(mat_d, mat_h, N * N * sizeof(int), cudaMemcpyHostToDevice)); for(int i = 0; i<nPreFixos; ++i) melhorSol_h[i] = INFINITO; CUDA_CHECK_RETURN( cudaMalloc((void **) &melhorSol_d, sizeof(int)*nPreFixos)); CUDA_CHECK_RETURN( cudaMalloc((void **) &sols_d, sizeof(int)*nPreFixos)); cudaStream_t vectorOfStreams[numStreams]; for(int stream_id=0; stream_id<numStreams; stream_id++) cudaStreamCreate(&vectorOfStreams[stream_id]); for(int stream_id=0; stream_id<numStreams; stream_id++) cudaMemcpyAsync(&path_d[stream_id*chunk*nivelPreFixos], &path_h[stream_id*chunk*nivelPreFixos], qtd_threads_streams[stream_id]*sizeof(short)*nivelPreFixos, cudaMemcpyHostToDevice,vectorOfStreams[stream_id]); for(int stream_id=0; stream_id<numStreams; stream_id++){ cudaMemcpyAsync(&melhorSol_d[stream_id*chunk], &melhorSol_h[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), cudaMemcpyHostToDevice, vectorOfStreams[stream_id]); } for(int stream_id=0; stream_id<numStreams; stream_id++){ cudaMemcpyAsync(&sols_d[stream_id*chunk], &sols_h[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), cudaMemcpyHostToDevice,vectorOfStreams[stream_id]); } for(int stream_id=0; stream_id<numStreams; stream_id++){ dfs_cuda_UB_stream<<<num_blocks,block_size,0,vectorOfStreams[stream_id]>>> (N,qtd_threads_streams[stream_id],mat_d, &path_d[stream_id*chunk*nivelPreFixos], nivelPreFixos,999999, &sols_d[stream_id*chunk],&melhorSol_d[stream_id*chunk]); } for(int stream_id=0; stream_id<numStreams; stream_id++) cudaMemcpyAsync(&sols_h[stream_id*chunk],&sols_d[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), cudaMemcpyDeviceToHost,vectorOfStreams[stream_id]); for(int stream_id=0;stream_id<numStreams; stream_id++) cudaMemcpyAsync(&melhorSol_h[stream_id*chunk],&melhorSol_d[stream_id*chunk], qtd_threads_streams[stream_id]*sizeof(int), cudaMemcpyDeviceToHost,vectorOfStreams[stream_id]); cudaDeviceSynchronize(); for(int i = 0; i<nPreFixos; ++i){ qtd_sols_global+=sols_h[i]; if(melhorSol_h[i]<otimo_global) otimo_global = melhorSol_h[i]; } printf("\n\n\n\t niveis preenchidos: %d.\n",nivelPreFixos); printf("\t Numero de streams: %d.\n",numStreams); printf("\t Tamanho do stream: %d.\n",chunk); printf("\nQuantidade de solucoes encontradas: %d.", qtd_sols_global); printf("\n\tOtimo global: %d.\n\n", otimo_global); CUDA_CHECK_RETURN( cudaFree(mat_d)); CUDA_CHECK_RETURN( cudaFree(sols_d)); CUDA_CHECK_RETURN( cudaFree(path_d)); CUDA_CHECK_RETURN( cudaFree(melhorSol_d)); return otimo_global; } int main() { read(); int niveis = 5; printf("\n\nEnumeracao com streams:\n\n"); callCompleteEnumStreams(niveis); return 0; }
741b6f8e030258ba2706172b3a9850d8619f20d1.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include "support.h" void InitialGrid(int *grid, int height, int width) { int i, j; for(i=0;i<height;i++) for(j=0;j<width;j++) grid[0*width*height+i*width+j]=grid[1*width*height+i*width+j]=0; } void GiveLife(int flag,int n, int *grid,int height, int width) { int i; for(i=0;i<n;i++) grid[flag*height*width+(rand()%height)*width+rand()%width]=1; } Matrix allocateMatrix(unsigned height, unsigned width) { Matrix mat; mat.height = height; mat.width = mat.pitch = width; mat.elements = (float*)malloc(height*width*sizeof(float)); if(mat.elements == NULL) FATAL("Unable to allocate host"); return mat; } void initMatrix(Matrix mat) { for (unsigned int i=0; i < mat.height*mat.width; i++) { mat.elements[i] = (rand()%100)/100.00; } } int IsLocationValid_CPU(int x, int y,int width, int height) { if(x<0||y<0||x>=height||y>=width) return 0; else return 1; } int CountNeighbors_CPU(int flag,int x, int y, int width, int height,int *grid) { int count=0; int i, j; int range =3; for(i=-(range/2);i<=(range/2);i++) { for(j=-(range/2);j<=(range/2);j++) { if(i==0&&j==0) continue; if(IsLocationValid_CPU(x+i,y+j, width,height)==0) continue; if(grid[flag*width*height+(x+i)*width +y+j]==1) count++; } } return count; } void GameofLife_CPU( int *grid, int width, int height, int nowGrid) { int count; for(int i=0;i<height;i++) { for(int j=0;j<width;j++) { count=CountNeighbors_CPU(nowGrid,i,j,width,height,grid); if(grid[nowGrid*width*height+i*width+j]==0) { if(count==3) grid[(1-nowGrid)*width*height+i*width+j]=1; else grid[(1-nowGrid)*width*height+i*width+j]=0; } else { if(count<=1||count>=4) grid[(1-nowGrid)*width*height+i*width+j]=0; else grid[(1-nowGrid)*width*height+i*width+j]=1; } } } } Matrix allocateDeviceMatrix(unsigned height, unsigned width) { Matrix mat; hipError_t cuda_ret; mat.height = height; mat.width = mat.pitch = width; cuda_ret = hipMalloc((void**)&(mat.elements), height*width*sizeof(float)); if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory"); return mat; } void copyToDeviceMatrix(Matrix dst, Matrix src) { hipError_t cuda_ret; cuda_ret = hipMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy to device"); } void copyFromDeviceMatrix(Matrix dst, Matrix src) { hipError_t cuda_ret; cuda_ret = hipMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy from device"); } void verify(int *GPU_result, int *CPU_result, int height, int width) { for(int i=0;i<2*width*height; i++) { if(GPU_result[i]!=CPU_result[i]) { printf("TEST FAILED\n\n"); exit(0); } } printf("TEST PASSED\n\n"); } void freeMatrix(Matrix mat) { free(mat.elements); mat.elements = NULL; } void freeDeviceMatrix(Matrix mat) { hipFree(mat.elements); mat.elements = NULL; } void startTime(Timer* timer) { gettimeofday(&(timer->startTime), NULL); } void stopTime(Timer* timer) { gettimeofday(&(timer->endTime), NULL); } float elapsedTime(Timer timer) { return ((float) ((timer.endTime.tv_sec - timer.startTime.tv_sec) \ + (timer.endTime.tv_usec - timer.startTime.tv_usec)/1.0e6)); }
741b6f8e030258ba2706172b3a9850d8619f20d1.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include "support.h" void InitialGrid(int *grid, int height, int width) { int i, j; for(i=0;i<height;i++) for(j=0;j<width;j++) grid[0*width*height+i*width+j]=grid[1*width*height+i*width+j]=0; } void GiveLife(int flag,int n, int *grid,int height, int width) { int i; for(i=0;i<n;i++) grid[flag*height*width+(rand()%height)*width+rand()%width]=1; } Matrix allocateMatrix(unsigned height, unsigned width) { Matrix mat; mat.height = height; mat.width = mat.pitch = width; mat.elements = (float*)malloc(height*width*sizeof(float)); if(mat.elements == NULL) FATAL("Unable to allocate host"); return mat; } void initMatrix(Matrix mat) { for (unsigned int i=0; i < mat.height*mat.width; i++) { mat.elements[i] = (rand()%100)/100.00; } } int IsLocationValid_CPU(int x, int y,int width, int height) { if(x<0||y<0||x>=height||y>=width) return 0; else return 1; } int CountNeighbors_CPU(int flag,int x, int y, int width, int height,int *grid) { int count=0; int i, j; int range =3; for(i=-(range/2);i<=(range/2);i++) { for(j=-(range/2);j<=(range/2);j++) { if(i==0&&j==0) continue; if(IsLocationValid_CPU(x+i,y+j, width,height)==0) continue; if(grid[flag*width*height+(x+i)*width +y+j]==1) count++; } } return count; } void GameofLife_CPU( int *grid, int width, int height, int nowGrid) { int count; for(int i=0;i<height;i++) { for(int j=0;j<width;j++) { count=CountNeighbors_CPU(nowGrid,i,j,width,height,grid); if(grid[nowGrid*width*height+i*width+j]==0) { if(count==3) grid[(1-nowGrid)*width*height+i*width+j]=1; else grid[(1-nowGrid)*width*height+i*width+j]=0; } else { if(count<=1||count>=4) grid[(1-nowGrid)*width*height+i*width+j]=0; else grid[(1-nowGrid)*width*height+i*width+j]=1; } } } } Matrix allocateDeviceMatrix(unsigned height, unsigned width) { Matrix mat; cudaError_t cuda_ret; mat.height = height; mat.width = mat.pitch = width; cuda_ret = cudaMalloc((void**)&(mat.elements), height*width*sizeof(float)); if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory"); return mat; } void copyToDeviceMatrix(Matrix dst, Matrix src) { cudaError_t cuda_ret; cuda_ret = cudaMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy to device"); } void copyFromDeviceMatrix(Matrix dst, Matrix src) { cudaError_t cuda_ret; cuda_ret = cudaMemcpy(dst.elements, src.elements, src.height*src.width*sizeof(float), cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy from device"); } void verify(int *GPU_result, int *CPU_result, int height, int width) { for(int i=0;i<2*width*height; i++) { if(GPU_result[i]!=CPU_result[i]) { printf("TEST FAILED\n\n"); exit(0); } } printf("TEST PASSED\n\n"); } void freeMatrix(Matrix mat) { free(mat.elements); mat.elements = NULL; } void freeDeviceMatrix(Matrix mat) { cudaFree(mat.elements); mat.elements = NULL; } void startTime(Timer* timer) { gettimeofday(&(timer->startTime), NULL); } void stopTime(Timer* timer) { gettimeofday(&(timer->endTime), NULL); } float elapsedTime(Timer timer) { return ((float) ((timer.endTime.tv_sec - timer.startTime.tv_sec) \ + (timer.endTime.tv_usec - timer.startTime.tv_usec)/1.0e6)); }
8752ec58a83957b725b4f8adbb403ccdd0bb5271.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<time.h> #include<cuda_runtime.h> #define THREAD_NUM 256 #define MATRIX_SIZE 1000 int blocks_num = (MATRIX_SIZE * MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM; void generateMatrix(float * a, float *b) { int i; int SIZE = MATRIX_SIZE * MATRIX_SIZE; for (i = 0; i < SIZE; i ++) { a[i] = i; b[i] = i * i; } } __global__ static void CUDAkernal(const float *a ,const float *b, float *c, int n) { int i; //block threadID const int tid = threadIdx.x; //blockID const int bid = blockIdx.x; //threadID const int idx = bid * THREAD_NUM + tid; const int row = idx / n; const int column = idx % n; if (row < n && column < n) { float t = 0; for (i = 0; i < n; i++) { t += a[row * n + i] * b[i * n + column]; } c[row*n+column] = t; } } int main() { float *a, *b, *c; float *cuda_a, * cuda_b, * cuda_c; int n = MATRIX_SIZE; //CPU alloc a = (float*)malloc(sizeof(float) * n * n); b = (float*)malloc(sizeof(float) * n * n); c = (float*)malloc(sizeof(float) * n * n); //GPU alloc hipMalloc((void**)&cuda_a, sizeof(float) * n * n); hipMalloc((void**)&cuda_b, sizeof(float) * n * n); hipMalloc((void**)&cuda_c, sizeof(float) * n * n); generateMatrix(a, b); //Copy hipMemcpy(cuda_a, a, sizeof(float)*n*n, hipMemcpyHostToDevice); hipMemcpy(cuda_b, b, sizeof(float)*n*n, hipMemcpyHostToDevice); CUDAkernal << <blocks_num, THREAD_NUM, 0>> >(cuda_a, cuda_b, cuda_c, n); //Copy hipMemcpy(c, cuda_c, sizeof(float)*n*n, hipMemcpyDeviceToHost); hipFree(cuda_a); hipFree(cuda_b); hipFree(cuda_c); }
8752ec58a83957b725b4f8adbb403ccdd0bb5271.cu
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<cuda_runtime.h> #define THREAD_NUM 256 #define MATRIX_SIZE 1000 int blocks_num = (MATRIX_SIZE * MATRIX_SIZE + THREAD_NUM - 1) / THREAD_NUM; void generateMatrix(float * a, float *b) { int i; int SIZE = MATRIX_SIZE * MATRIX_SIZE; for (i = 0; i < SIZE; i ++) { a[i] = i; b[i] = i * i; } } __global__ static void CUDAkernal(const float *a ,const float *b, float *c, int n) { int i; //block threadID const int tid = threadIdx.x; //blockID const int bid = blockIdx.x; //threadID const int idx = bid * THREAD_NUM + tid; const int row = idx / n; const int column = idx % n; if (row < n && column < n) { float t = 0; for (i = 0; i < n; i++) { t += a[row * n + i] * b[i * n + column]; } c[row*n+column] = t; } } int main() { float *a, *b, *c; float *cuda_a, * cuda_b, * cuda_c; int n = MATRIX_SIZE; //CPU alloc a = (float*)malloc(sizeof(float) * n * n); b = (float*)malloc(sizeof(float) * n * n); c = (float*)malloc(sizeof(float) * n * n); //GPU alloc cudaMalloc((void**)&cuda_a, sizeof(float) * n * n); cudaMalloc((void**)&cuda_b, sizeof(float) * n * n); cudaMalloc((void**)&cuda_c, sizeof(float) * n * n); generateMatrix(a, b); //Copy cudaMemcpy(cuda_a, a, sizeof(float)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(cuda_b, b, sizeof(float)*n*n, cudaMemcpyHostToDevice); CUDAkernal << <blocks_num, THREAD_NUM, 0>> >(cuda_a, cuda_b, cuda_c, n); //Copy cudaMemcpy(c, cuda_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost); cudaFree(cuda_a); cudaFree(cuda_b); cudaFree(cuda_c); }
3af55bb0d4b75e0c4eeca6fce73ecce81a7bb596.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "intArrayAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *size = NULL; hipMalloc(&size, XSIZE*YSIZE); const int *input = NULL; hipMalloc(&input, XSIZE*YSIZE); int *output = NULL; hipMalloc(&output, XSIZE*YSIZE); const int *inFreeArray = NULL; hipMalloc(&inFreeArray, XSIZE*YSIZE); int *length = NULL; hipMalloc(&length, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( intArrayAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, size,input,output,inFreeArray,length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( intArrayAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, size,input,output,inFreeArray,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( intArrayAdd), dim3(gridBlock),dim3(threadBlock), 0, 0, size,input,output,inFreeArray,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3af55bb0d4b75e0c4eeca6fce73ecce81a7bb596.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "intArrayAdd.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *size = NULL; cudaMalloc(&size, XSIZE*YSIZE); const int *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); int *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); const int *inFreeArray = NULL; cudaMalloc(&inFreeArray, XSIZE*YSIZE); int *length = NULL; cudaMalloc(&length, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); intArrayAdd<<<gridBlock,threadBlock>>>(size,input,output,inFreeArray,length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { intArrayAdd<<<gridBlock,threadBlock>>>(size,input,output,inFreeArray,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { intArrayAdd<<<gridBlock,threadBlock>>>(size,input,output,inFreeArray,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a1ff93ab32be8d6b181890e1358e6551ae69e216.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include "cv.h" #include "highgui.h" #include "elas.h" #include <vector> #include "triangle.h" #include "matrix.h" #include <stdlib.h> using namespace std; #define WIDTH 320 #define HEIGH 240 #define GRID_SIZE 20 enum setting { ROBOTICS, MIDDLEBURY }; // parameter settings struct parameters { int32_t disp_min; // min disparity int32_t disp_max; // max disparity float support_threshold; // max. uniqueness ratio (best vs. second best support match) int32_t support_texture; // min texture for support points int32_t candidate_stepsize; // step size of regular grid on which support points are matched int32_t incon_window_size; // window size of inconsistent support point check int32_t incon_threshold; // disparity similarity threshold for support point to be considered consistent int32_t incon_min_support; // minimum number of consistent support points bool add_corners; // add support points at image corners with nearest neighbor disparities int32_t grid_size; // size of neighborhood for additional support point extrapolation float beta; // image likelihood parameter float gamma; // prior constant float sigma; // prior sigma float sradius; // prior sigma radius int32_t match_texture; // min texture for dense matching int32_t lr_threshold; // disparity threshold for left/right consistency check float speckle_sim_threshold; // similarity threshold for speckle segmentation int32_t speckle_size; // maximal size of a speckle (small speckles get removed) int32_t ipol_gap_width; // interpolate small gaps (left<->right, top<->bottom) bool filter_median; // optional median filter (approximated) bool filter_adaptive_mean; // optional adaptive mean filter (approximated) bool postprocess_only_left; // saves time by not postprocessing the right image bool subsampling; // saves time by only computing disparities for each 2nd pixel // note: for this option D1 and D2 must be passed with size // width/2 x height/2 (rounded towards zero) // constructor parameters(setting s = ROBOTICS) { // default settings in a robotics environment // (do not produce results in half-occluded areas // and are a bit more robust towards lighting etc.) //half-occluded if (s == ROBOTICS) { disp_min = 0; disp_max = 63; support_threshold = 0.85; support_texture = 10; candidate_stepsize = 5; incon_window_size = 5; incon_threshold = 5; incon_min_support = 5; add_corners = 0; grid_size = 20; beta = 0.02; gamma = 3; sigma = 1; sradius = 2; match_texture = 1; //dense matching lr_threshold = 2; // speckle_sim_threshold = 2; // speckle_size = 200; //size ipol_gap_width = 7; // filter_median = 0; filter_adaptive_mean = 0; postprocess_only_left = 1; subsampling = 0; // default settings for middlebury benchmark // (interpolate all missing disparities) middlebury } else { disp_min = 0; disp_max = 63; support_threshold = 0.85; support_texture = 10; candidate_stepsize = 5; incon_window_size = 5; incon_threshold = 5; incon_min_support = 5; add_corners = 1; grid_size = 20; beta = 0.02; gamma = 5; sigma = 1; sradius = 3; match_texture = 0; lr_threshold = 2; speckle_sim_threshold = 1; speckle_size = 200; ipol_gap_width = 5000; filter_median = 1; filter_adaptive_mean = 0; postprocess_only_left = 0; subsampling = 0; } } }; // parameter set parameters param(ROBOTICS); //static hipStream_t stream1, stream2, stream3, stream4; struct support_pt { int32_t u; int32_t v; int32_t d; support_pt(int32_t u, int32_t v, int32_t d) :u(u), v(v), d(d) {} }; struct support_pt1 { int32_t u; int32_t v; int32_t d; }; struct triangle { int32_t c1, c2, c3; float t1a, t1b, t1c; float t2a, t2b, t2c; triangle(int32_t c1, int32_t c2, int32_t c3) :c1(c1), c2(c2), c3(c3) {} }; struct triangle1 { int32_t c1, c2, c3; float t1a, t1b, t1c; float t2a, t2b, t2c; int32_t pointNum; }; struct plane { float t1a, t1b, t1c; float t2a; }; __device__ uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } inline uint32_t getAddressOffsetImage(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } __device__ unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset) { unsigned int a, b, c, e, r0, r4; a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1)); b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3)); c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5)); e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7)); r0 = a + b + c + e; a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9)); b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11)); c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13)); e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15)); r4 = a + b + c + e; return r0 + r4; } inline uint32_t getAddressOffsetGrid(const int32_t& x, const int32_t& y, const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0); if (val<min_val) { min_val = val; min_d = d; } } __device__ void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int32_t &w, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0) + w; if (val<min_val) { min_val = val; min_d = d; } } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __constant__ int32_t grid_dims_g[3] = {65, WIDTH/GRID_SIZE, HEIGH/GRID_SIZE} ; __global__ void Triangle_Match1(triangle1* tri, int32_t* disparity_grid, int32_t *grid_dims,\ uint8_t* I1_desc, uint8_t* I2_desc, int32_t* P, \ int32_t plane_radius, bool right_image, float* D, \ int32_t* tp) { float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0; int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; int32_t id; __shared__ uint8_t I1_desc_share[320 * 16]; __shared__ uint8_t I2_desc_share[320 * 16]; for(int i = 0; i < 16; i += 1 ) { I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320]; I2_desc_share[u + i*320 ] = I2_desc[v * 320*16 + u + i*320]; } __syncthreads(); id = tp[2 * u + v * 2 * WIDTH + 1]; plane_a = tri[id].t1a; plane_b = tri[id].t1b; plane_c = tri[id].t1c; plane_d = tri[id].t2a; bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7; // get image width and height const int32_t disp_num = grid_dims_g[0] - 1; // const int32_t disp_num = grid_dims[0] - 1; const int32_t window_size = 2; // address of disparity we want to compute uint32_t d_addr; d_addr = getAddressOffsetImage1(u, v, WIDTH); // compute line start address int32_t line_offset = 16 * WIDTH*max(min(v, HEIGH - 3), 2); uint8_t *I1_line_addr, *I2_line_addr; // I1_line_addr = I1_desc + line_offset; // I2_line_addr = I2_desc + line_offset; // uint8_t* I1_block_addr = I1_line_addr + 16 * u; I2_line_addr = I2_desc_share ; uint8_t* I1_block_addr = I1_desc_share + 16 * u; // does this patch have enough texture? int32_t sum = 0; //int32_t match_texture = 1; // //#pragma unroll // for (int32_t i = 0; i<16; i++) // sum += abs((int32_t)(*(I1_block_addr + i)) - 127); // if (sum<match_texture) // return; // compute disparity, min disparity and max disparity of plane prior // int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c); int32_t d_plane = (int32_t)(0); int32_t d_plane_min = max(d_plane - plane_radius, 0); int32_t d_plane_max = min(d_plane + plane_radius, disp_num - 1); // get grid pointer int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE); int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE); uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims[1], grid_dims[0]); int32_t num_grid = *(disparity_grid + grid_addr); int32_t* d_grid = disparity_grid + grid_addr + 1; // uint32_t grid_addr = grid_x * grid_dims_g[0]; // int32_t num_grid = *(disparity_grid_g + grid_addr); // int32_t* d_grid = disparity_grid_g + grid_addr + 1; // loop variables int32_t d_curr, u_warp, val; int32_t min_val = 10000; int32_t min_d = -1; // left image if (!right_image) { //#pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u - d_curr; // if (u_warp<window_size || u_warp >= WIDTH - window_size) // continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); // updatePosteriorMinimumNew(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, val, min_val, min_d); } } //#pragma unroll for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u - d_curr; // if (u_warp<window_size || u_warp >= WIDTH - window_size) // continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,valid?*(P+abs(d_curr-d_plane)):0,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); // updatePosteriorMinimumNew1(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } // right image } else { //#pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u + d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); // updatePosteriorMinimumNew(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, val, min_val, min_d); } } //#pragma unroll for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u + d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,valid?*(P+abs(d_curr-d_plane)):0,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); // updatePosteriorMinimumNew1(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } // set disparity value if (min_d >= 0) *(D + d_addr) = min_d; // MAP value (min neg-Log probability) else *(D + d_addr) = -1; // invalid disparity } //void computeTrianglePoints(support_pt1* p_support, triangle1* tri, bool right_image, int32_t width, int32_t TRI_SIZE, int32_t* tp) { void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \ bool right_image, int32_t width, int32_t TRI_SIZE, int32_t* tp) { // loop variables int32_t c1, c2, c3; // float plane_a, plane_b, plane_c, plane_d; // for all triangles do for (uint32_t i = 0; i<TRI_SIZE; i++) { int num = 0; // get plane parameters uint32_t p_i = i * 3; // triangle corners c1 = tri[i].c1; c2 = tri[i].c2; c3 = tri[i].c3; // sort triangle corners wrt. u (ascending) float tri_u[3]; if (!right_image) { // tri_u[0] = p_support[c1].u; tri_u[1] = p_support[c2].u; tri_u[2] = p_support[c3].u; } else { // tri_u[0] = p_support[c1].u - p_support[c1].d; tri_u[1] = p_support[c2].u - p_support[c2].d; tri_u[2] = p_support[c3].u - p_support[c3].d; } float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v }; for (uint32_t j = 0; j<3; j++) { for (uint32_t k = 0; k<j; k++) { if (tri_u[k]>tri_u[j]) { float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp; float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp; } } } // rename corners float A_u = tri_u[0]; float A_v = tri_v[0]; float B_u = tri_u[1]; float B_v = tri_v[1]; float C_u = tri_u[2]; float C_v = tri_v[2]; // compute straight lines connecting triangle corners float AB_a = 0; float AC_a = 0; float BC_a = 0; if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u); if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u); if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u); float AB_b = A_v - AB_a*A_u; float AC_b = A_v - AC_a*A_u; float BC_b = B_v - BC_a*B_u; // first part (triangle corner A->B) if ((int32_t)(A_u) != (int32_t)(B_u)) { for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; *(tp + 2 * u + v * 2 * width + 1) = i; // num++; } } } } // second part (triangle corner B->C) if ((int32_t)(B_u) != (int32_t)(C_u)) { for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; *(tp + 2 * u + v * 2 * width + 1) = i; // num++; } } } } // tri[i].pointNum = num; } } int32_t width, height, bpl; uint8_t* I_desc1 = NULL; uint8_t* I_desc2 = NULL; int32_t* grid_dims_gpu = NULL; int32_t* disparity_grid_gpu_1 = NULL; int32_t* disparity_grid_gpu_2 = NULL; float* D1_gpu = NULL; float* D2_gpu = NULL; int32_t* P_gpu = NULL; triangle1* tri_gpu_1, *tri_gpu_2; plane *plane_1, *plane_2; plane *plane_g1, *plane_g2; hipError_t err; int32_t dims[3] = {WIDTH,HEIGH,WIDTH}; static int flag = 1; void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ float* D1, float* D2, uint8_t* I1, uint8_t* I2) { clock_t t1, t2; // get width, height and bytes per line width = dims[0]; //715*492 height = dims[1]; bpl = width + 15 - (width - 1) % 16; //720 // allocate memory for disparity grid int32_t grid_width = (int32_t)ceil((float)width / (float)20); int32_t grid_height = (int32_t)ceil((float)height / (float)20); int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height }; // grid_dims[3] = { 63 + 2,grid_width,grid_height }; int32_t P_SUPPORT_SIZE = p_support.size(); int32_t TRI_SIZE1 = tri_1.size(); int32_t TRI_SIZE2 = tri_2.size(); int32_t* tp1_cpu, *tp2_cpu; int32_t *tp1_gpu, *tp2_gpu; cout<<"P_SUPPORT_SIZE: "<<P_SUPPORT_SIZE<<endl; cout<< "TRI_SIZE1: " << TRI_SIZE1 <<endl; cout<< "TRI_SIZE2: " << TRI_SIZE2 <<endl; if(1 == flag){ hipMalloc((void **)&grid_dims_gpu, sizeof(int32_t) * 3); hipMalloc((void **)&disparity_grid_gpu_1, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width); hipMalloc((void **)&disparity_grid_gpu_2, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width); } if(1 == flag){ hipMalloc((void **)&tri_gpu_1, sizeof(triangle1) * TRI_SIZE1); hipMalloc((void **)&tri_gpu_2, sizeof(triangle1) * TRI_SIZE2); hipMalloc((void **)&D1_gpu, sizeof(float) * width * height); hipMalloc((void **)&D2_gpu, sizeof(float) * width * height); hipMalloc((void **)&P_gpu, sizeof(int32_t) * width * height); hipMalloc((void **)&I_desc1, 16 * width*height * sizeof(uint8_t)); hipMalloc((void **)&I_desc2, 16 * width*height * sizeof(uint8_t)); } flag = 0; tp2_cpu = (int32_t*)malloc(sizeof(int32_t) * width * height * 2); tp1_cpu = (int32_t*)malloc(sizeof(int32_t) * width * height * 2); for (int j = 0; j < height; j++) { for (int i = 0; i < width * 2; i++) { tp1_cpu[i + j * width * 2] = -1; tp2_cpu[i + j * width * 2] = -1; } } t1 = clock(); computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_cpu); computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_cpu); t2 = clock(); printf("computeTripoints : %ldms\n", (t2 - t1)/1000); //if(1 == flag){ hipMalloc((void **)&tp1_gpu, sizeof(int32_t) * width * height * 2); hipMalloc((void **)&tp2_gpu, sizeof(int32_t) * width * height * 2); hipMemcpy(tp1_gpu, tp1_cpu, sizeof(int32_t) * width * height * 2, hipMemcpyHostToDevice); hipMemcpy(tp2_gpu, tp2_cpu, sizeof(int32_t) * width * height * 2, hipMemcpyHostToDevice); hipMemcpy(grid_dims_gpu, grid_dims, sizeof(int32_t) * 3, hipMemcpyHostToDevice); hipMemcpy(disparity_grid_gpu_1, disparity_grid_1, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width, hipMemcpyHostToDevice); hipMemcpy(disparity_grid_gpu_2, disparity_grid_2, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width, hipMemcpyHostToDevice); // init disparity image to -10 if (param.subsampling) { for (int32_t i = 0; i < (width / 2)*(height / 2); i++) { *(D1 + i) = -10; *(D2 + i) = -10; } } else { for (int32_t i = 0; i < width*height; i++) { *(D1 + i) = -10; *(D2 + i) = -10; } } hipMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, hipMemcpyHostToDevice); hipMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, hipMemcpyHostToDevice); hipMemcpy(D1_gpu, D1, sizeof(float) * width * height, hipMemcpyHostToDevice); hipMemcpy(D2_gpu, D2, sizeof(float) * width * height, hipMemcpyHostToDevice); hipMemcpy(I_desc1, I1, 16 * width*height * sizeof(uint8_t), hipMemcpyHostToDevice); hipMemcpy(I_desc2, I2, 16 * width*height * sizeof(uint8_t), hipMemcpyHostToDevice); // number of disparities const int32_t disp_num = grid_dims[0] - 1; // descriptor window_size int32_t window_size = 2; // pre-compute prior float two_sigma_squared = 2 * param.sigma*param.sigma; int32_t* P = new int32_t[disp_num]; for (int32_t delta_d = 0; delta_d<disp_num; delta_d++) P[delta_d] = (int32_t)((-log(param.gamma + exp(-delta_d*delta_d / two_sigma_squared)) + log(param.gamma)) / param.beta); int32_t plane_radius = (int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0); //plane_radius = 2; hipMemcpy(P_gpu, P, sizeof(int32_t) * disp_num, hipMemcpyHostToDevice); //bool subsampling = param.subsampling; //int32_t match_texture = param.match_texture; //int32_t grid_size = param.grid_size; dim3 threads(320, 1); dim3 grid(iDivUp(width, (threads.x)), iDivUp(height,threads.y)); printf("goin Triangle_match kernel\n"); t1 = clock(); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_gpu_1, \ grid_dims_gpu, I_desc1, I_desc2, P_gpu, plane_radius, 0, D1_gpu, \ tp1_gpu); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_gpu_2, \ grid_dims_gpu, I_desc2, I_desc1, P_gpu, plane_radius, 1, D2_gpu, \ tp2_gpu); hipMemcpy(D1, D1_gpu, sizeof(float) * width * height, hipMemcpyDeviceToHost); hipMemcpy(D2, D2_gpu, sizeof(float) * width * height, hipMemcpyDeviceToHost); t2 = clock(); printf("Triangle_Match1 : %ldms\n", (t2 - t1)/1000); //hipDeviceReset(); //hipFree(D1_gpu); //hipFree(D2_gpu); //hipFree(P_gpu); //hipFree(I_desc1); //hipFree(I_desc2); //hipFree(tp1_gpu); //hipFree(tp2_gpu); //hipFree(grid_dims_gpu); //hipFree(disparity_grid_gpu_1); //hipFree(disparity_grid_gpu_2); //hipFree(plane_g1); //hipFree(plane_g2); } //err = hipFuncSetCacheConfig(Triangle_Match1,hipFuncCachePreferL1); //if(hipSuccess != err) //{ // printf("hipFuncSetCacheConfig error %s\n", hipGetErrorString(err)); //} //Triangle_Match1 << <1, 715*492>> > (tri_gpu_1, disparity_grid_gpu_1, \ // grid_dims_gpu, I_desc1, I_desc2, P_gpu, plane_radius, 0, D1_gpu, width, height, TRI_SIZE1, subsampling, \ // match_texture, grid_size, tp1_gpu); //for(int i = 0; i< 10000 ; i++) //{ // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; //} // for(int i = 10000; i< 11000 ; i++) // { // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; // } //printf("over memcpy\n"); // err = hipGetLastError(); //if(hipSuccess != err) //{ // printf("error %s\n", hipGetErrorString(err)); //} // for(int i = 7000; i< 8000 ; i++) // { // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; // } //int main() //{ // cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ // vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ // float* D1, float* D2,uint8_t* I1, uint8_t* I2, int dim); // return 0; //}
a1ff93ab32be8d6b181890e1358e6551ae69e216.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> #include "cv.h" #include "highgui.h" #include "elas.h" #include <vector> #include "triangle.h" #include "matrix.h" #include <stdlib.h> using namespace std; #define WIDTH 320 #define HEIGH 240 #define GRID_SIZE 20 enum setting { ROBOTICS, MIDDLEBURY }; // parameter settings struct parameters { int32_t disp_min; // min disparity int32_t disp_max; // max disparity float support_threshold; // max. uniqueness ratio (best vs. second best support match) 最大视差唯一性百分比 int32_t support_texture; // min texture for support points 最小纹理支持点 int32_t candidate_stepsize; // step size of regular grid on which support points are matched int32_t incon_window_size; // window size of inconsistent support point check int32_t incon_threshold; // disparity similarity threshold for support point to be considered consistent int32_t incon_min_support; // minimum number of consistent support points bool add_corners; // add support points at image corners with nearest neighbor disparities int32_t grid_size; // size of neighborhood for additional support point extrapolation float beta; // image likelihood parameter float gamma; // prior constant float sigma; // prior sigma float sradius; // prior sigma radius int32_t match_texture; // min texture for dense matching int32_t lr_threshold; // disparity threshold for left/right consistency check float speckle_sim_threshold; // similarity threshold for speckle segmentation int32_t speckle_size; // maximal size of a speckle (small speckles get removed) int32_t ipol_gap_width; // interpolate small gaps (left<->right, top<->bottom) bool filter_median; // optional median filter (approximated) bool filter_adaptive_mean; // optional adaptive mean filter (approximated) bool postprocess_only_left; // saves time by not postprocessing the right image bool subsampling; // saves time by only computing disparities for each 2nd pixel // note: for this option D1 and D2 must be passed with size // width/2 x height/2 (rounded towards zero) // constructor parameters(setting s = ROBOTICS) { // default settings in a robotics environment // (do not produce results in half-occluded areas // and are a bit more robust towards lighting etc.) //默认设置为实验环境,不能在half-occluded环境和大量光照条件下使用 if (s == ROBOTICS) { disp_min = 0; disp_max = 63; support_threshold = 0.85; support_texture = 10; candidate_stepsize = 5; incon_window_size = 5; incon_threshold = 5; incon_min_support = 5; add_corners = 0; grid_size = 20; beta = 0.02; gamma = 3; sigma = 1; sradius = 2; match_texture = 1; //dense matching的最小纹理 lr_threshold = 2; //一致性检测阈值 speckle_sim_threshold = 2; //删除细小片段相似性分割阈值 speckle_size = 200; //删除细小片段size ipol_gap_width = 7; //间隙插值阈值 filter_median = 0; filter_adaptive_mean = 0; postprocess_only_left = 1; subsampling = 0; // default settings for middlebury benchmark // (interpolate all missing disparities) middlebury基准,插入所有失踪的视差 } else { disp_min = 0; disp_max = 63; support_threshold = 0.85; support_texture = 10; candidate_stepsize = 5; incon_window_size = 5; incon_threshold = 5; incon_min_support = 5; add_corners = 1; grid_size = 20; beta = 0.02; gamma = 5; sigma = 1; sradius = 3; match_texture = 0; lr_threshold = 2; speckle_sim_threshold = 1; speckle_size = 200; ipol_gap_width = 5000; filter_median = 1; filter_adaptive_mean = 0; postprocess_only_left = 0; subsampling = 0; } } }; // parameter set parameters param(ROBOTICS); //static cudaStream_t stream1, stream2, stream3, stream4; struct support_pt { int32_t u; int32_t v; int32_t d; support_pt(int32_t u, int32_t v, int32_t d) :u(u), v(v), d(d) {} }; struct support_pt1 { int32_t u; int32_t v; int32_t d; }; struct triangle { int32_t c1, c2, c3; float t1a, t1b, t1c; float t2a, t2b, t2c; triangle(int32_t c1, int32_t c2, int32_t c3) :c1(c1), c2(c2), c3(c3) {} }; struct triangle1 { int32_t c1, c2, c3; float t1a, t1b, t1c; float t2a, t2b, t2c; int32_t pointNum; }; struct plane { float t1a, t1b, t1c; float t2a; }; __device__ uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } inline uint32_t getAddressOffsetImage(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } __device__ unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset) { unsigned int a, b, c, e, r0, r4; a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1)); b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3)); c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5)); e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7)); r0 = a + b + c + e; a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9)); b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11)); c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13)); e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15)); r4 = a + b + c + e; return r0 + r4; } inline uint32_t getAddressOffsetGrid(const int32_t& x, const int32_t& y, const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0); if (val<min_val) { min_val = val; min_d = d; } } __device__ void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int32_t &w, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0) + w; if (val<min_val) { min_val = val; min_d = d; } } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __constant__ int32_t grid_dims_g[3] = {65, WIDTH/GRID_SIZE, HEIGH/GRID_SIZE} ; __global__ void Triangle_Match1(triangle1* tri, int32_t* disparity_grid, int32_t *grid_dims,\ uint8_t* I1_desc, uint8_t* I2_desc, int32_t* P, \ int32_t plane_radius, bool right_image, float* D, \ int32_t* tp) { float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0; int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; int32_t id; __shared__ uint8_t I1_desc_share[320 * 16]; __shared__ uint8_t I2_desc_share[320 * 16]; for(int i = 0; i < 16; i += 1 ) { I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320]; I2_desc_share[u + i*320 ] = I2_desc[v * 320*16 + u + i*320]; } __syncthreads(); id = tp[2 * u + v * 2 * WIDTH + 1]; plane_a = tri[id].t1a; plane_b = tri[id].t1b; plane_c = tri[id].t1c; plane_d = tri[id].t2a; bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7; // get image width and height const int32_t disp_num = grid_dims_g[0] - 1; // const int32_t disp_num = grid_dims[0] - 1; const int32_t window_size = 2; // address of disparity we want to compute uint32_t d_addr; d_addr = getAddressOffsetImage1(u, v, WIDTH); // compute line start address int32_t line_offset = 16 * WIDTH*max(min(v, HEIGH - 3), 2); uint8_t *I1_line_addr, *I2_line_addr; // I1_line_addr = I1_desc + line_offset; // I2_line_addr = I2_desc + line_offset; // uint8_t* I1_block_addr = I1_line_addr + 16 * u; I2_line_addr = I2_desc_share ; uint8_t* I1_block_addr = I1_desc_share + 16 * u; // does this patch have enough texture? int32_t sum = 0; //int32_t match_texture = 1; // //#pragma unroll // for (int32_t i = 0; i<16; i++) // sum += abs((int32_t)(*(I1_block_addr + i)) - 127); // if (sum<match_texture) // return; // compute disparity, min disparity and max disparity of plane prior // int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c); int32_t d_plane = (int32_t)(0); int32_t d_plane_min = max(d_plane - plane_radius, 0); int32_t d_plane_max = min(d_plane + plane_radius, disp_num - 1); // get grid pointer int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE); int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE); uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims[1], grid_dims[0]); int32_t num_grid = *(disparity_grid + grid_addr); int32_t* d_grid = disparity_grid + grid_addr + 1; // uint32_t grid_addr = grid_x * grid_dims_g[0]; // int32_t num_grid = *(disparity_grid_g + grid_addr); // int32_t* d_grid = disparity_grid_g + grid_addr + 1; // loop variables int32_t d_curr, u_warp, val; int32_t min_val = 10000; int32_t min_d = -1; // left image if (!right_image) { //#pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u - d_curr; // if (u_warp<window_size || u_warp >= WIDTH - window_size) // continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); // updatePosteriorMinimumNew(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, val, min_val, min_d); } } //#pragma unroll for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u - d_curr; // if (u_warp<window_size || u_warp >= WIDTH - window_size) // continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,valid?*(P+abs(d_curr-d_plane)):0,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); // updatePosteriorMinimumNew1(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } // right image } else { //#pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u + d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); // updatePosteriorMinimumNew(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, val, min_val, min_d); } } //#pragma unroll for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u + d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; // updatePosteriorMinimum((__m128i*)(I2_line_addr+16*u_warp),d_curr,valid?*(P+abs(d_curr-d_plane)):0,xmm1,xmm2,val,min_val,min_d); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); // updatePosteriorMinimumNew1(I1_block_addr, I2_desc_share + 16 * u_warp, d_curr, valid ? *(P + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } // set disparity value if (min_d >= 0) *(D + d_addr) = min_d; // MAP value (min neg-Log probability) else *(D + d_addr) = -1; // invalid disparity } //void computeTrianglePoints(support_pt1* p_support, triangle1* tri, bool right_image, int32_t width, int32_t TRI_SIZE, int32_t* tp) { void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \ bool right_image, int32_t width, int32_t TRI_SIZE, int32_t* tp) { // loop variables int32_t c1, c2, c3; // float plane_a, plane_b, plane_c, plane_d; // for all triangles do for (uint32_t i = 0; i<TRI_SIZE; i++) { int num = 0; // get plane parameters uint32_t p_i = i * 3; // triangle corners c1 = tri[i].c1; c2 = tri[i].c2; c3 = tri[i].c3; // sort triangle corners wrt. u (ascending) float tri_u[3]; if (!right_image) { //左图像 tri_u[0] = p_support[c1].u; tri_u[1] = p_support[c2].u; tri_u[2] = p_support[c3].u; } else { //右图像 tri_u[0] = p_support[c1].u - p_support[c1].d; tri_u[1] = p_support[c2].u - p_support[c2].d; tri_u[2] = p_support[c3].u - p_support[c3].d; } float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v }; for (uint32_t j = 0; j<3; j++) { for (uint32_t k = 0; k<j; k++) { if (tri_u[k]>tri_u[j]) { float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp; float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp; } } } // rename corners float A_u = tri_u[0]; float A_v = tri_v[0]; float B_u = tri_u[1]; float B_v = tri_v[1]; float C_u = tri_u[2]; float C_v = tri_v[2]; // compute straight lines connecting triangle corners float AB_a = 0; float AC_a = 0; float BC_a = 0; if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u); if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u); if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u); float AB_b = A_v - AB_a*A_u; float AC_b = A_v - AC_a*A_u; float BC_b = B_v - BC_a*B_u; // first part (triangle corner A->B) if ((int32_t)(A_u) != (int32_t)(B_u)) { for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; *(tp + 2 * u + v * 2 * width + 1) = i; // num++; } } } } // second part (triangle corner B->C) if ((int32_t)(B_u) != (int32_t)(C_u)) { for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; *(tp + 2 * u + v * 2 * width + 1) = i; // num++; } } } } // tri[i].pointNum = num; } } int32_t width, height, bpl; uint8_t* I_desc1 = NULL; uint8_t* I_desc2 = NULL; int32_t* grid_dims_gpu = NULL; int32_t* disparity_grid_gpu_1 = NULL; int32_t* disparity_grid_gpu_2 = NULL; float* D1_gpu = NULL; float* D2_gpu = NULL; int32_t* P_gpu = NULL; triangle1* tri_gpu_1, *tri_gpu_2; plane *plane_1, *plane_2; plane *plane_g1, *plane_g2; cudaError_t err; int32_t dims[3] = {WIDTH,HEIGH,WIDTH}; static int flag = 1; void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ float* D1, float* D2, uint8_t* I1, uint8_t* I2) { clock_t t1, t2; // get width, height and bytes per line width = dims[0]; //715*492 height = dims[1]; bpl = width + 15 - (width - 1) % 16; //720 // allocate memory for disparity grid int32_t grid_width = (int32_t)ceil((float)width / (float)20); int32_t grid_height = (int32_t)ceil((float)height / (float)20); int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height }; // grid_dims[3] = { 63 + 2,grid_width,grid_height }; int32_t P_SUPPORT_SIZE = p_support.size(); int32_t TRI_SIZE1 = tri_1.size(); int32_t TRI_SIZE2 = tri_2.size(); int32_t* tp1_cpu, *tp2_cpu; int32_t *tp1_gpu, *tp2_gpu; cout<<"P_SUPPORT_SIZE: "<<P_SUPPORT_SIZE<<endl; cout<< "TRI_SIZE1: " << TRI_SIZE1 <<endl; cout<< "TRI_SIZE2: " << TRI_SIZE2 <<endl; if(1 == flag){ cudaMalloc((void **)&grid_dims_gpu, sizeof(int32_t) * 3); cudaMalloc((void **)&disparity_grid_gpu_1, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width); cudaMalloc((void **)&disparity_grid_gpu_2, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width); } if(1 == flag){ cudaMalloc((void **)&tri_gpu_1, sizeof(triangle1) * TRI_SIZE1); cudaMalloc((void **)&tri_gpu_2, sizeof(triangle1) * TRI_SIZE2); cudaMalloc((void **)&D1_gpu, sizeof(float) * width * height); cudaMalloc((void **)&D2_gpu, sizeof(float) * width * height); cudaMalloc((void **)&P_gpu, sizeof(int32_t) * width * height); cudaMalloc((void **)&I_desc1, 16 * width*height * sizeof(uint8_t)); cudaMalloc((void **)&I_desc2, 16 * width*height * sizeof(uint8_t)); } flag = 0; tp2_cpu = (int32_t*)malloc(sizeof(int32_t) * width * height * 2); tp1_cpu = (int32_t*)malloc(sizeof(int32_t) * width * height * 2); for (int j = 0; j < height; j++) { for (int i = 0; i < width * 2; i++) { tp1_cpu[i + j * width * 2] = -1; tp2_cpu[i + j * width * 2] = -1; } } t1 = clock(); computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_cpu); computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_cpu); t2 = clock(); printf("computeTripoints : %ldms\n", (t2 - t1)/1000); //if(1 == flag){ cudaMalloc((void **)&tp1_gpu, sizeof(int32_t) * width * height * 2); cudaMalloc((void **)&tp2_gpu, sizeof(int32_t) * width * height * 2); cudaMemcpy(tp1_gpu, tp1_cpu, sizeof(int32_t) * width * height * 2, cudaMemcpyHostToDevice); cudaMemcpy(tp2_gpu, tp2_cpu, sizeof(int32_t) * width * height * 2, cudaMemcpyHostToDevice); cudaMemcpy(grid_dims_gpu, grid_dims, sizeof(int32_t) * 3, cudaMemcpyHostToDevice); cudaMemcpy(disparity_grid_gpu_1, disparity_grid_1, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width, cudaMemcpyHostToDevice); cudaMemcpy(disparity_grid_gpu_2, disparity_grid_2, sizeof(int32_t) * (param.disp_max + 2) * grid_height * grid_width, cudaMemcpyHostToDevice); // init disparity image to -10 if (param.subsampling) { for (int32_t i = 0; i < (width / 2)*(height / 2); i++) { *(D1 + i) = -10; *(D2 + i) = -10; } } else { for (int32_t i = 0; i < width*height; i++) { *(D1 + i) = -10; *(D2 + i) = -10; } } cudaMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, cudaMemcpyHostToDevice); cudaMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, cudaMemcpyHostToDevice); cudaMemcpy(D1_gpu, D1, sizeof(float) * width * height, cudaMemcpyHostToDevice); cudaMemcpy(D2_gpu, D2, sizeof(float) * width * height, cudaMemcpyHostToDevice); cudaMemcpy(I_desc1, I1, 16 * width*height * sizeof(uint8_t), cudaMemcpyHostToDevice); cudaMemcpy(I_desc2, I2, 16 * width*height * sizeof(uint8_t), cudaMemcpyHostToDevice); // number of disparities const int32_t disp_num = grid_dims[0] - 1; // descriptor window_size int32_t window_size = 2; // pre-compute prior float two_sigma_squared = 2 * param.sigma*param.sigma; int32_t* P = new int32_t[disp_num]; for (int32_t delta_d = 0; delta_d<disp_num; delta_d++) P[delta_d] = (int32_t)((-log(param.gamma + exp(-delta_d*delta_d / two_sigma_squared)) + log(param.gamma)) / param.beta); int32_t plane_radius = (int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0); //plane_radius = 2; cudaMemcpy(P_gpu, P, sizeof(int32_t) * disp_num, cudaMemcpyHostToDevice); //bool subsampling = param.subsampling; //int32_t match_texture = param.match_texture; //int32_t grid_size = param.grid_size; dim3 threads(320, 1); dim3 grid(iDivUp(width, (threads.x)), iDivUp(height,threads.y)); printf("goin Triangle_match kernel\n"); t1 = clock(); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_gpu_1, \ grid_dims_gpu, I_desc1, I_desc2, P_gpu, plane_radius, 0, D1_gpu, \ tp1_gpu); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_gpu_2, \ grid_dims_gpu, I_desc2, I_desc1, P_gpu, plane_radius, 1, D2_gpu, \ tp2_gpu); cudaMemcpy(D1, D1_gpu, sizeof(float) * width * height, cudaMemcpyDeviceToHost); cudaMemcpy(D2, D2_gpu, sizeof(float) * width * height, cudaMemcpyDeviceToHost); t2 = clock(); printf("Triangle_Match1 : %ldms\n", (t2 - t1)/1000); //cudaThreadExit(); //cudaFree(D1_gpu); //cudaFree(D2_gpu); //cudaFree(P_gpu); //cudaFree(I_desc1); //cudaFree(I_desc2); //cudaFree(tp1_gpu); //cudaFree(tp2_gpu); //cudaFree(grid_dims_gpu); //cudaFree(disparity_grid_gpu_1); //cudaFree(disparity_grid_gpu_2); //cudaFree(plane_g1); //cudaFree(plane_g2); } //err = cudaFuncSetCacheConfig(Triangle_Match1,cudaFuncCachePreferL1); //if(cudaSuccess != err) //{ // printf("cudaFuncSetCacheConfig error %s\n", cudaGetErrorString(err)); //} //Triangle_Match1 << <1, 715*492>> > (tri_gpu_1, disparity_grid_gpu_1, \ // grid_dims_gpu, I_desc1, I_desc2, P_gpu, plane_radius, 0, D1_gpu, width, height, TRI_SIZE1, subsampling, \ // match_texture, grid_size, tp1_gpu); //for(int i = 0; i< 10000 ; i++) //{ // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; //} // for(int i = 10000; i< 11000 ; i++) // { // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; // } //printf("over memcpy\n"); // err = cudaGetLastError(); //if(cudaSuccess != err) //{ // printf("error %s\n", cudaGetErrorString(err)); //} // for(int i = 7000; i< 8000 ; i++) // { // //cout <<I1+i<<" "; // printf("%d ", *(D1+i)); // if(i%20 == 0) // cout<<endl; // } //int main() //{ // cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ // vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ // float* D1, float* D2,uint8_t* I1, uint8_t* I2, int dim); // return 0; //}
a4a18d872e524b969712de3d32b9fdf73407eab0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { // We are not yet at the final dimension, call copy recursivley for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1, src_data, dest_data, is_forward); } } else { // We are at the last two dimensions, which are stored continuously in // memory. With (N,C,H,W) // (0,1,2,3) cur_dim -> H // cur_dim+1 -> W const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim+1); std::vector<int> ind_off(cur_dim+2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim+1] = offsets[cur_dim+1]; // Compute copy strides const int src_outer_stride = bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1); const int src_inner_stride = bottom[0]->shape(cur_dim+1); const int dest_outer_stride = top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1); const int dest_inner_stride = top[0]->shape(cur_dim+1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
a4a18d872e524b969712de3d32b9fdf73407eab0.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { // We are not yet at the final dimension, call copy recursivley for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1, src_data, dest_data, is_forward); } } else { // We are at the last two dimensions, which are stored continuously in // memory. With (N,C,H,W) // (0,1,2,3) cur_dim -> H // cur_dim+1 -> W const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim+1); std::vector<int> ind_off(cur_dim+2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim+1] = offsets[cur_dim+1]; // Compute copy strides const int src_outer_stride = bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1); const int src_inner_stride = bottom[0]->shape(cur_dim+1); const int dest_outer_stride = top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1); const int dest_inner_stride = top[0]->shape(cur_dim+1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
3ffe1761f32a51f10a599c22d4c64973be4545f5.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::hip::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::hip::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::hip::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::hip::par(alloc), sumap.begin(), sumap.end()); } }; /*! \brief Area Under Curve metric computation for ranking datasets */ struct EvalAucGpu : public Metric { public: // This function object computes the positive precision pair for each prediction group class ComputePosPair : public thrust::unary_function<uint32_t, double> { public: XGBOOST_DEVICE ComputePosPair(const double *pred_group_pos_precision, const double *pred_group_neg_precision, const double *pred_group_incr_precision) : pred_group_pos_precision_(pred_group_pos_precision), pred_group_neg_precision_(pred_group_neg_precision), pred_group_incr_precision_(pred_group_incr_precision) {} // Compute positive precision pair for the prediction group at 'idx' __device__ __forceinline__ double operator()(uint32_t idx) const { return pred_group_neg_precision_[idx] * (pred_group_incr_precision_[idx] + pred_group_pos_precision_[idx] * 0.5); } private: // Accumulated positive precision for the prediction group const double *pred_group_pos_precision_{nullptr}; // Accumulated negative precision for the prediction group const double *pred_group_neg_precision_{nullptr}; // Incremental positive precision for the prediction group const double *pred_group_incr_precision_{nullptr}; }; template <typename T> void ReleaseMemory(dh::caching_device_vector<T> &vec) { // NOLINT dh::caching_device_vector<T>().swap(vec); } bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_; auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); auto dweights = info.weights_.ConstDevicePointer(); // Sort all the predictions (from one or more groups) dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan(); const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan(); // Group info on device const auto &dgroups = segment_pred_sorter.GetGroupsSpan(); uint32_t ngroups = segment_pred_sorter.GetNumGroups(); // Final values double hsum_auc = 0.0; unsigned hauc_error = 0; int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; if (ngroups == 1) { const auto nitems = segment_pred_sorter.GetNumItems(); // First, segment all the predictions in the group. This is required so that we can // aggregate the positive and negative precisions within that prediction group dh::caching_device_vector<unsigned> dpred_segs(nitems, 0); auto *pred_seg_arr = dpred_segs.data().get(); // This is for getting the next segment number dh::caching_device_vector<unsigned> seg_idx(1, 0); auto *seg_idx_ptr = seg_idx.data().get(); dh::caching_device_vector<double> dbuf_pos(nitems, 0); dh::caching_device_vector<double> dbuf_neg(nitems, 0); auto *buf_pos_arr = dbuf_pos.data().get(); auto *buf_neg_arr = dbuf_neg.data().get(); dh::LaunchN(device_id, nitems, nullptr, [=] __device__(int idx) { auto ctr = dlabels[dpreds_orig_pos[idx]]; // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights == nullptr ? 1.0f : dweights[dpreds_orig_pos[idx]]; buf_pos_arr[idx] = ctr * wt; buf_neg_arr[idx] = (1.0f - ctr) * wt; if (idx == nitems - 1 || dsorted_preds[idx] != dsorted_preds[idx + 1]) { auto new_seg_idx = atomicAdd(seg_idx_ptr, 1); auto pred_val = dsorted_preds[idx]; do { pred_seg_arr[idx] = new_seg_idx; idx--; } while (idx >= 0 && dsorted_preds[idx] == pred_val); } }); auto nunique_preds = seg_idx.back(); ReleaseMemory(seg_idx); // Next, accumulate the positive and negative precisions for every prediction group dh::caching_device_vector<double> sum_dbuf_pos(nunique_preds, 0); auto itr = thrust::reduce_by_key(thrust::hip::par(alloc), dpred_segs.begin(), dpred_segs.end(), // Segmented by this dbuf_pos.begin(), // Individual precisions thrust::make_discard_iterator(), // Ignore unique segments sum_dbuf_pos.begin()); // Write accumulated results here ReleaseMemory(dbuf_pos); CHECK(itr.second - sum_dbuf_pos.begin() == nunique_preds); dh::caching_device_vector<double> sum_dbuf_neg(nunique_preds, 0); itr = thrust::reduce_by_key(thrust::hip::par(alloc), dpred_segs.begin(), dpred_segs.end(), dbuf_neg.begin(), thrust::make_discard_iterator(), sum_dbuf_neg.begin()); ReleaseMemory(dbuf_neg); ReleaseMemory(dpred_segs); CHECK(itr.second - sum_dbuf_neg.begin() == nunique_preds); dh::caching_device_vector<double> sum_nneg(nunique_preds, 0); thrust::inclusive_scan(thrust::hip::par(alloc), sum_dbuf_neg.begin(), sum_dbuf_neg.end(), sum_nneg.begin()); double sum_neg_prec_val = sum_nneg.back(); ReleaseMemory(sum_nneg); // Find incremental sum for the positive precisions that is then used to // compute incremental positive precision pair dh::caching_device_vector<double> sum_npos(nunique_preds + 1, 0); thrust::inclusive_scan(thrust::hip::par(alloc), sum_dbuf_pos.begin(), sum_dbuf_pos.end(), sum_npos.begin() + 1); double sum_pos_prec_val = sum_npos.back(); if (sum_pos_prec_val <= 0.0 || sum_neg_prec_val <= 0.0) { hauc_error = 1; } else { dh::caching_device_vector<double> sum_pospair(nunique_preds, 0); // Finally, compute the positive precision pair thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(static_cast<uint32_t>(nunique_preds)), sum_pospair.begin(), ComputePosPair(sum_dbuf_pos.data().get(), sum_dbuf_neg.data().get(), sum_npos.data().get())); ReleaseMemory(sum_dbuf_pos); ReleaseMemory(sum_dbuf_neg); ReleaseMemory(sum_npos); hsum_auc = thrust::reduce(thrust::hip::par(alloc), sum_pospair.begin(), sum_pospair.end()) / (sum_pos_prec_val * sum_neg_prec_val); } } else { // AUC sum for each group dh::caching_device_vector<double> sum_auc(ngroups, 0); // AUC error across all groups dh::caching_device_vector<int> auc_error(1, 0); auto *dsum_auc = sum_auc.data().get(); auto *dauc_error = auc_error.data().get(); // For each group item compute the aggregated precision dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { double sum_pospair = 0.0, sum_npos = 0.0, sum_nneg = 0.0, buf_pos = 0.0, buf_neg = 0.0; for (auto i = dgroups[gidx]; i < dgroups[gidx + 1]; ++i) { const auto ctr = dlabels[dpreds_orig_pos[i]]; // Keep bucketing predictions in same bucket if (i != dgroups[gidx] && dsorted_preds[i] != dsorted_preds[i - 1]) { sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5); sum_npos += buf_pos; sum_nneg += buf_neg; buf_neg = buf_pos = 0.0f; } // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights == nullptr ? 1.0f : dweights[gidx]; buf_pos += ctr * wt; buf_neg += (1.0f - ctr) * wt; } sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5); sum_npos += buf_pos; sum_nneg += buf_neg; // Check weird conditions if (sum_npos <= 0.0 || sum_nneg <= 0.0) { atomicAdd(dauc_error, 1); } else { // This is the AUC dsum_auc[gidx] = sum_pospair / (sum_npos * sum_nneg); } }); hsum_auc = thrust::reduce(thrust::hip::par(alloc), sum_auc.begin(), sum_auc.end()); hauc_error = auc_error.back(); // Copy it back to host } // Report average AUC across all groups // In distributed mode, workers which only contains pos or neg samples // will be ignored when aggregate AUC. bst_float dat[2] = {0.0f, 0.0f}; if (hauc_error < ngroups) { dat[0] = static_cast<bst_float>(hsum_auc); dat[1] = static_cast<bst_float>(ngroups - hauc_error); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } CHECK_GT(dat[1], 0.0f) << "AUC: the dataset only contains pos or neg samples"; return dat[0] / dat[1]; } const char* Name() const override { return "auc"; } }; /*! \brief Area Under PR Curve metric computation for ranking datasets */ struct EvalAucPRGpu : public Metric { public: // This function object computes the item's positive/negative precision value class ComputeItemPrecision : public thrust::unary_function<uint32_t, float> { public: // The precision type to be computed enum class PrecisionType { kPositive, kNegative }; XGBOOST_DEVICE ComputeItemPrecision(PrecisionType ptype, uint32_t ngroups, const float *dweights, const xgboost::common::Span<const uint32_t> &dgidxs, const float *dlabels) : ptype_(ptype), ngroups_(ngroups), dweights_(dweights), dgidxs_(dgidxs), dlabels_(dlabels) {} // Compute precision value for the prediction that was originally at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights_ == nullptr ? 1.0f : dweights_[ngroups_ == 1 ? idx : dgidxs_[idx]]; return wt * (ptype_ == PrecisionType::kPositive ? dlabels_[idx] : (1.0f - dlabels_[idx])); } private: PrecisionType ptype_; // Precision type to be computed uint32_t ngroups_; // Number of groups in the dataset const float *dweights_; // Instance/group weights const xgboost::common::Span<const uint32_t> dgidxs_; // The group a given instance belongs to const float *dlabels_; // Unsorted labels in the dataset }; bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_; auto device = tparam_->gpu_id; dh::safe_cuda(hipSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); auto dweights = info.weights_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan(); // Group info on device const auto &dgroups = segment_pred_sorter.GetGroupsSpan(); uint32_t ngroups = segment_pred_sorter.GetNumGroups(); const auto &dgroup_idx = segment_pred_sorter.GetGroupSegmentsSpan(); // First, aggregate the positive and negative precision for each group dh::caching_device_vector<double> total_pos(ngroups, 0); dh::caching_device_vector<double> total_neg(ngroups, 0); // Allocator to be used for managing space overhead while performing transformed reductions dh::XGBCachingDeviceAllocator<char> alloc; // Compute each elements positive precision value and reduce them across groups concurrently. ComputeItemPrecision pos_prec_functor(ComputeItemPrecision::PrecisionType::kPositive, ngroups, dweights, dgroup_idx, dlabels); auto end_range = thrust::reduce_by_key(thrust::hip::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of positive precision values within a group dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()), pos_prec_functor), thrust::make_discard_iterator(), // We don't care for the group indices total_pos.begin()); // Sum of positive precision values in the group CHECK(end_range.second - total_pos.begin() == total_pos.size()); // Compute each elements negative precision value and reduce them across groups concurrently. ComputeItemPrecision neg_prec_functor(ComputeItemPrecision::PrecisionType::kNegative, ngroups, dweights, dgroup_idx, dlabels); end_range = thrust::reduce_by_key(thrust::hip::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of negative precision values within a group dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()), neg_prec_functor), thrust::make_discard_iterator(), // We don't care for the group indices total_neg.begin()); // Sum of negative precision values in the group CHECK(end_range.second - total_neg.begin() == total_neg.size()); const auto *dtotal_pos = total_pos.data().get(); const auto *dtotal_neg = total_neg.data().get(); // AUC sum for each group dh::caching_device_vector<double> sum_auc(ngroups, 0); // AUC error across all groups dh::caching_device_vector<int> auc_error(1, 0); auto *dsum_auc = sum_auc.data().get(); auto *dauc_error = auc_error.data().get(); int device_id = -1; dh::safe_cuda(hipGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { // We need pos > 0 && neg > 0 if (dtotal_pos[gidx] <= 0.0 || dtotal_neg[gidx] <= 0.0) { atomicAdd(dauc_error, 1); } else { auto gbegin = dgroups[gidx]; auto gend = dgroups[gidx + 1]; // Calculate AUC double tp = 0.0, prevtp = 0.0, fp = 0.0, prevfp = 0.0, h = 0.0, a = 0.0, b = 0.0; for (auto i = gbegin; i < gend; ++i) { const auto wt = dweights == nullptr ? 1.0f : dweights[ngroups == 1 ? dpreds_orig_pos[i] : gidx]; tp += wt * dlabels[dpreds_orig_pos[i]]; fp += wt * (1.0f - dlabels[dpreds_orig_pos[i]]); if ((i < gend - 1 && dsorted_preds[i] != dsorted_preds[i + 1]) || (i == gend - 1)) { if (tp == prevtp) { a = 1.0; b = 0.0; } else { h = (fp - prevfp) / (tp - prevtp); a = 1.0 + h; b = (prevfp - h * prevtp) / dtotal_pos[gidx]; } if (0.0 != b) { dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx] - b / a * (::log(a * tp / dtotal_pos[gidx] + b) - ::log(a * prevtp / dtotal_pos[gidx] + b))) / a; } else { dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx]) / a; } prevtp = tp; prevfp = fp; } } // Sanity check if (tp < 0 || prevtp < 0 || fp < 0 || prevfp < 0) { // Check if we have any metric error thus far auto current_auc_error = atomicAdd(dauc_error, 0); KERNEL_CHECK(!current_auc_error); } } }); const auto hsum_auc = thrust::reduce(thrust::hip::par(alloc), sum_auc.begin(), sum_auc.end()); const auto hauc_error = auc_error.back(); // Copy it back to host // Report average AUC-PR across all groups // In distributed mode, workers which only contains pos or neg samples // will be ignored when aggregate AUC-PR. bst_float dat[2] = {0.0f, 0.0f}; if (hauc_error < static_cast<int>(ngroups)) { dat[0] = static_cast<bst_float>(hsum_auc); dat[1] = static_cast<bst_float>(static_cast<int>(ngroups) - hauc_error); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } CHECK_GT(dat[1], 0.0f) << "AUC-PR: the dataset only contains pos or neg samples"; CHECK_LE(dat[0], dat[1]) << "AUC-PR: AUC > 1.0"; return dat[0] / dat[1]; } const char* Name() const override { return "aucpr"; } }; XGBOOST_REGISTER_GPU_METRIC(AucGpu, "auc") .describe("Area under curve for rank computed on GPU.") .set_body([](const char* param) { return new EvalAucGpu(); }); XGBOOST_REGISTER_GPU_METRIC(AucPRGpu, "aucpr") .describe("Area under PR curve for rank computed on GPU.") .set_body([](const char* param) { return new EvalAucPRGpu(); }); XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
3ffe1761f32a51f10a599c22d4c64973be4545f5.cu
/*! * Copyright 2020 by Contributors * \file rank_metric.cc * \brief prediction rank based metrics. * \author Kailong Chen, Tianqi Chen */ #include <rabit/rabit.h> #include <dmlc/registry.h> #include <xgboost/metric.h> #include <xgboost/host_device_vector.h> #include <thrust/iterator/discard_iterator.h> #include <cmath> #include <vector> #include "metric_common.h" #include "../common/math.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace metric { // tag the this file, used by force static link later. DMLC_REGISTRY_FILE_TAG(rank_metric_gpu); /*! \brief Evaluate rank list on GPU */ template <typename EvalMetricT> struct EvalRankGpu : public Metric, public EvalRankConfig { public: bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(preds.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.size() == 0 ? tgptr : info.group_ptr_; const auto ngroups = static_cast<bst_omp_uint>(gptr.size() - 1); auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); // Compute individual group metric and sum them up return EvalMetricT::EvalMetric(segment_pred_sorter, dlabels, *this); } const char* Name() const override { return name.c_str(); } explicit EvalRankGpu(const char* name, const char* param) { using namespace std; // NOLINT(*) if (param != nullptr) { std::ostringstream os; if (sscanf(param, "%u[-]?", &this->topn) == 1) { os << name << '@' << param; this->name = os.str(); } else { os << name << param; this->name = os.str(); } if (param[strlen(param) - 1] == '-') { this->minus = true; } } else { this->name = name; } } }; /*! \brief Precision at N, for both classification and rank */ struct EvalPrecisionGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT // Find each group's metric sum dh::caching_device_vector<uint32_t> hits(ngroups, 0); const auto nitems = pred_sorter.GetNumItems(); auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn && DetermineNonTrivialLabelLambda(idx)) { atomicAdd(&dhits[group_idx], 1); } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return static_cast<double>(thrust::reduce(thrust::cuda::par(alloc), hits.begin(), hits.end())) / ecfg.topn; } }; /*! \brief NDCG: Normalized Discounted Cumulative Gain at N */ struct EvalNDCGGpu { public: static void ComputeDCG(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg, // The order in which labels have to be accessed. The order is determined // by sorting the predictions or the labels for the entire dataset const xgboost::common::Span<const uint32_t> &dlabels_sort_order, dh::caching_device_vector<double> *dcgptr) { dh::caching_device_vector<double> &dcgs(*dcgptr); // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // First, determine non zero labels in the dataset individually auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dlabels_sort_order[idx]])); }; // NOLINT // Find each group's DCG value const auto nitems = pred_sorter.GetNumItems(); auto *ddcgs = dcgs.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; auto label = DetermineNonTrivialLabelLambda(idx); if (ridx < ecfg.topn && label) { atomicAdd(&ddcgs[group_idx], ((1 << label) - 1) / std::log2(ridx + 2.0)); } }); } static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Sort the labels and compute IDCG dh::SegmentSorter<float> segment_label_sorter; segment_label_sorter.SortItems(dlabels, pred_sorter.GetNumItems(), pred_sorter.GetGroupSegmentsSpan()); uint32_t ngroups = pred_sorter.GetNumGroups(); dh::caching_device_vector<double> idcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, segment_label_sorter.GetOriginalPositionsSpan(), &idcg); // Compute the DCG values next dh::caching_device_vector<double> dcg(ngroups, 0); ComputeDCG(pred_sorter, dlabels, ecfg, pred_sorter.GetOriginalPositionsSpan(), &dcg); double *ddcg = dcg.data().get(); double *didcg = idcg.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // Compute the group's DCG and reduce it across all groups dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { if (didcg[gidx] == 0.0f) { ddcg[gidx] = (ecfg.minus) ? 0.0f : 1.0f; } else { ddcg[gidx] /= didcg[gidx]; } }); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; return thrust::reduce(thrust::cuda::par(alloc), dcg.begin(), dcg.end()); } }; /*! \brief Mean Average Precision at N, for both classification and rank */ struct EvalMAPGpu { public: static double EvalMetric(const dh::SegmentSorter<float> &pred_sorter, const float *dlabels, const EvalRankConfig &ecfg) { // Group info on device const auto &dgroups = pred_sorter.GetGroupsSpan(); const auto ngroups = pred_sorter.GetNumGroups(); const auto &dgroup_idx = pred_sorter.GetGroupSegmentsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = pred_sorter.GetOriginalPositionsSpan(); // First, determine non zero labels in the dataset individually const auto nitems = pred_sorter.GetNumItems(); dh::caching_device_vector<uint32_t> hits(nitems, 0); auto DetermineNonTrivialLabelLambda = [=] __device__(uint32_t idx) { return (static_cast<unsigned>(dlabels[dpreds_orig_pos[idx]]) != 0) ? 1 : 0; }; // NOLINT thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(nitems), hits.begin(), DetermineNonTrivialLabelLambda); // Allocator to be used by sort for managing space overhead while performing prefix scans dh::XGBCachingDeviceAllocator<char> alloc; // Next, prefix scan the nontrivial labels that are segmented to accumulate them. // This is required for computing the metric sum // Data segmented into different groups... thrust::inclusive_scan_by_key(thrust::cuda::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), hits.begin(), // Input value hits.begin()); // In-place scan // Find each group's metric sum dh::caching_device_vector<double> sumap(ngroups, 0); auto *dsumap = sumap.data().get(); const auto *dhits = hits.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN(device_id, nitems, nullptr, [=] __device__(uint32_t idx) { if (DetermineNonTrivialLabelLambda(idx)) { const auto group_idx = dgroup_idx[idx]; const auto group_begin = dgroups[group_idx]; const auto ridx = idx - group_begin; if (ridx < ecfg.topn) { atomicAdd(&dsumap[group_idx], static_cast<double>(dhits[idx]) / (ridx + 1)); } } }); // Aggregate the group's item precisions dh::LaunchN(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { auto nhits = dgroups[gidx + 1] ? dhits[dgroups[gidx + 1] - 1] : 0; if (nhits != 0) { dsumap[gidx] /= nhits; } else { if (ecfg.minus) { dsumap[gidx] = 0; } else { dsumap[gidx] = 1; } } }); return thrust::reduce(thrust::cuda::par(alloc), sumap.begin(), sumap.end()); } }; /*! \brief Area Under Curve metric computation for ranking datasets */ struct EvalAucGpu : public Metric { public: // This function object computes the positive precision pair for each prediction group class ComputePosPair : public thrust::unary_function<uint32_t, double> { public: XGBOOST_DEVICE ComputePosPair(const double *pred_group_pos_precision, const double *pred_group_neg_precision, const double *pred_group_incr_precision) : pred_group_pos_precision_(pred_group_pos_precision), pred_group_neg_precision_(pred_group_neg_precision), pred_group_incr_precision_(pred_group_incr_precision) {} // Compute positive precision pair for the prediction group at 'idx' __device__ __forceinline__ double operator()(uint32_t idx) const { return pred_group_neg_precision_[idx] * (pred_group_incr_precision_[idx] + pred_group_pos_precision_[idx] * 0.5); } private: // Accumulated positive precision for the prediction group const double *pred_group_pos_precision_{nullptr}; // Accumulated negative precision for the prediction group const double *pred_group_neg_precision_{nullptr}; // Incremental positive precision for the prediction group const double *pred_group_incr_precision_{nullptr}; }; template <typename T> void ReleaseMemory(dh::caching_device_vector<T> &vec) { // NOLINT dh::caching_device_vector<T>().swap(vec); } bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_; auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); auto dweights = info.weights_.ConstDevicePointer(); // Sort all the predictions (from one or more groups) dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan(); const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan(); // Group info on device const auto &dgroups = segment_pred_sorter.GetGroupsSpan(); uint32_t ngroups = segment_pred_sorter.GetNumGroups(); // Final values double hsum_auc = 0.0; unsigned hauc_error = 0; int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // Allocator to be used for managing space overhead while performing reductions dh::XGBCachingDeviceAllocator<char> alloc; if (ngroups == 1) { const auto nitems = segment_pred_sorter.GetNumItems(); // First, segment all the predictions in the group. This is required so that we can // aggregate the positive and negative precisions within that prediction group dh::caching_device_vector<unsigned> dpred_segs(nitems, 0); auto *pred_seg_arr = dpred_segs.data().get(); // This is for getting the next segment number dh::caching_device_vector<unsigned> seg_idx(1, 0); auto *seg_idx_ptr = seg_idx.data().get(); dh::caching_device_vector<double> dbuf_pos(nitems, 0); dh::caching_device_vector<double> dbuf_neg(nitems, 0); auto *buf_pos_arr = dbuf_pos.data().get(); auto *buf_neg_arr = dbuf_neg.data().get(); dh::LaunchN(device_id, nitems, nullptr, [=] __device__(int idx) { auto ctr = dlabels[dpreds_orig_pos[idx]]; // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights == nullptr ? 1.0f : dweights[dpreds_orig_pos[idx]]; buf_pos_arr[idx] = ctr * wt; buf_neg_arr[idx] = (1.0f - ctr) * wt; if (idx == nitems - 1 || dsorted_preds[idx] != dsorted_preds[idx + 1]) { auto new_seg_idx = atomicAdd(seg_idx_ptr, 1); auto pred_val = dsorted_preds[idx]; do { pred_seg_arr[idx] = new_seg_idx; idx--; } while (idx >= 0 && dsorted_preds[idx] == pred_val); } }); auto nunique_preds = seg_idx.back(); ReleaseMemory(seg_idx); // Next, accumulate the positive and negative precisions for every prediction group dh::caching_device_vector<double> sum_dbuf_pos(nunique_preds, 0); auto itr = thrust::reduce_by_key(thrust::cuda::par(alloc), dpred_segs.begin(), dpred_segs.end(), // Segmented by this dbuf_pos.begin(), // Individual precisions thrust::make_discard_iterator(), // Ignore unique segments sum_dbuf_pos.begin()); // Write accumulated results here ReleaseMemory(dbuf_pos); CHECK(itr.second - sum_dbuf_pos.begin() == nunique_preds); dh::caching_device_vector<double> sum_dbuf_neg(nunique_preds, 0); itr = thrust::reduce_by_key(thrust::cuda::par(alloc), dpred_segs.begin(), dpred_segs.end(), dbuf_neg.begin(), thrust::make_discard_iterator(), sum_dbuf_neg.begin()); ReleaseMemory(dbuf_neg); ReleaseMemory(dpred_segs); CHECK(itr.second - sum_dbuf_neg.begin() == nunique_preds); dh::caching_device_vector<double> sum_nneg(nunique_preds, 0); thrust::inclusive_scan(thrust::cuda::par(alloc), sum_dbuf_neg.begin(), sum_dbuf_neg.end(), sum_nneg.begin()); double sum_neg_prec_val = sum_nneg.back(); ReleaseMemory(sum_nneg); // Find incremental sum for the positive precisions that is then used to // compute incremental positive precision pair dh::caching_device_vector<double> sum_npos(nunique_preds + 1, 0); thrust::inclusive_scan(thrust::cuda::par(alloc), sum_dbuf_pos.begin(), sum_dbuf_pos.end(), sum_npos.begin() + 1); double sum_pos_prec_val = sum_npos.back(); if (sum_pos_prec_val <= 0.0 || sum_neg_prec_val <= 0.0) { hauc_error = 1; } else { dh::caching_device_vector<double> sum_pospair(nunique_preds, 0); // Finally, compute the positive precision pair thrust::transform(thrust::make_counting_iterator(static_cast<uint32_t>(0)), thrust::make_counting_iterator(static_cast<uint32_t>(nunique_preds)), sum_pospair.begin(), ComputePosPair(sum_dbuf_pos.data().get(), sum_dbuf_neg.data().get(), sum_npos.data().get())); ReleaseMemory(sum_dbuf_pos); ReleaseMemory(sum_dbuf_neg); ReleaseMemory(sum_npos); hsum_auc = thrust::reduce(thrust::cuda::par(alloc), sum_pospair.begin(), sum_pospair.end()) / (sum_pos_prec_val * sum_neg_prec_val); } } else { // AUC sum for each group dh::caching_device_vector<double> sum_auc(ngroups, 0); // AUC error across all groups dh::caching_device_vector<int> auc_error(1, 0); auto *dsum_auc = sum_auc.data().get(); auto *dauc_error = auc_error.data().get(); // For each group item compute the aggregated precision dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { double sum_pospair = 0.0, sum_npos = 0.0, sum_nneg = 0.0, buf_pos = 0.0, buf_neg = 0.0; for (auto i = dgroups[gidx]; i < dgroups[gidx + 1]; ++i) { const auto ctr = dlabels[dpreds_orig_pos[i]]; // Keep bucketing predictions in same bucket if (i != dgroups[gidx] && dsorted_preds[i] != dsorted_preds[i - 1]) { sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5); sum_npos += buf_pos; sum_nneg += buf_neg; buf_neg = buf_pos = 0.0f; } // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights == nullptr ? 1.0f : dweights[gidx]; buf_pos += ctr * wt; buf_neg += (1.0f - ctr) * wt; } sum_pospair += buf_neg * (sum_npos + buf_pos * 0.5); sum_npos += buf_pos; sum_nneg += buf_neg; // Check weird conditions if (sum_npos <= 0.0 || sum_nneg <= 0.0) { atomicAdd(dauc_error, 1); } else { // This is the AUC dsum_auc[gidx] = sum_pospair / (sum_npos * sum_nneg); } }); hsum_auc = thrust::reduce(thrust::cuda::par(alloc), sum_auc.begin(), sum_auc.end()); hauc_error = auc_error.back(); // Copy it back to host } // Report average AUC across all groups // In distributed mode, workers which only contains pos or neg samples // will be ignored when aggregate AUC. bst_float dat[2] = {0.0f, 0.0f}; if (hauc_error < ngroups) { dat[0] = static_cast<bst_float>(hsum_auc); dat[1] = static_cast<bst_float>(ngroups - hauc_error); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } CHECK_GT(dat[1], 0.0f) << "AUC: the dataset only contains pos or neg samples"; return dat[0] / dat[1]; } const char* Name() const override { return "auc"; } }; /*! \brief Area Under PR Curve metric computation for ranking datasets */ struct EvalAucPRGpu : public Metric { public: // This function object computes the item's positive/negative precision value class ComputeItemPrecision : public thrust::unary_function<uint32_t, float> { public: // The precision type to be computed enum class PrecisionType { kPositive, kNegative }; XGBOOST_DEVICE ComputeItemPrecision(PrecisionType ptype, uint32_t ngroups, const float *dweights, const xgboost::common::Span<const uint32_t> &dgidxs, const float *dlabels) : ptype_(ptype), ngroups_(ngroups), dweights_(dweights), dgidxs_(dgidxs), dlabels_(dlabels) {} // Compute precision value for the prediction that was originally at 'idx' __device__ __forceinline__ float operator()(uint32_t idx) const { // For ranking task, weights are per-group // For binary classification task, weights are per-instance const auto wt = dweights_ == nullptr ? 1.0f : dweights_[ngroups_ == 1 ? idx : dgidxs_[idx]]; return wt * (ptype_ == PrecisionType::kPositive ? dlabels_[idx] : (1.0f - dlabels_[idx])); } private: PrecisionType ptype_; // Precision type to be computed uint32_t ngroups_; // Number of groups in the dataset const float *dweights_; // Instance/group weights const xgboost::common::Span<const uint32_t> dgidxs_; // The group a given instance belongs to const float *dlabels_; // Unsorted labels in the dataset }; bst_float Eval(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, bool distributed) override { // Sanity check is done by the caller std::vector<unsigned> tgptr(2, 0); tgptr[1] = static_cast<unsigned>(info.labels_.Size()); const std::vector<unsigned> &gptr = info.group_ptr_.empty() ? tgptr : info.group_ptr_; auto device = tparam_->gpu_id; dh::safe_cuda(cudaSetDevice(device)); info.labels_.SetDevice(device); preds.SetDevice(device); info.weights_.SetDevice(device); auto dpreds = preds.ConstDevicePointer(); auto dlabels = info.labels_.ConstDevicePointer(); auto dweights = info.weights_.ConstDevicePointer(); // Sort all the predictions dh::SegmentSorter<float> segment_pred_sorter; segment_pred_sorter.SortItems(dpreds, preds.Size(), gptr); const auto &dsorted_preds = segment_pred_sorter.GetItemsSpan(); // Original positions of the predictions after they have been sorted const auto &dpreds_orig_pos = segment_pred_sorter.GetOriginalPositionsSpan(); // Group info on device const auto &dgroups = segment_pred_sorter.GetGroupsSpan(); uint32_t ngroups = segment_pred_sorter.GetNumGroups(); const auto &dgroup_idx = segment_pred_sorter.GetGroupSegmentsSpan(); // First, aggregate the positive and negative precision for each group dh::caching_device_vector<double> total_pos(ngroups, 0); dh::caching_device_vector<double> total_neg(ngroups, 0); // Allocator to be used for managing space overhead while performing transformed reductions dh::XGBCachingDeviceAllocator<char> alloc; // Compute each elements positive precision value and reduce them across groups concurrently. ComputeItemPrecision pos_prec_functor(ComputeItemPrecision::PrecisionType::kPositive, ngroups, dweights, dgroup_idx, dlabels); auto end_range = thrust::reduce_by_key(thrust::cuda::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of positive precision values within a group dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()), pos_prec_functor), thrust::make_discard_iterator(), // We don't care for the group indices total_pos.begin()); // Sum of positive precision values in the group CHECK(end_range.second - total_pos.begin() == total_pos.size()); // Compute each elements negative precision value and reduce them across groups concurrently. ComputeItemPrecision neg_prec_functor(ComputeItemPrecision::PrecisionType::kNegative, ngroups, dweights, dgroup_idx, dlabels); end_range = thrust::reduce_by_key(thrust::cuda::par(alloc), dh::tcbegin(dgroup_idx), dh::tcend(dgroup_idx), thrust::make_transform_iterator( // The indices need not be sequential within a group, as we care only // about the sum of negative precision values within a group dh::tcbegin(segment_pred_sorter.GetOriginalPositionsSpan()), neg_prec_functor), thrust::make_discard_iterator(), // We don't care for the group indices total_neg.begin()); // Sum of negative precision values in the group CHECK(end_range.second - total_neg.begin() == total_neg.size()); const auto *dtotal_pos = total_pos.data().get(); const auto *dtotal_neg = total_neg.data().get(); // AUC sum for each group dh::caching_device_vector<double> sum_auc(ngroups, 0); // AUC error across all groups dh::caching_device_vector<int> auc_error(1, 0); auto *dsum_auc = sum_auc.data().get(); auto *dauc_error = auc_error.data().get(); int device_id = -1; dh::safe_cuda(cudaGetDevice(&device_id)); // For each group item compute the aggregated precision dh::LaunchN<1, 32>(device_id, ngroups, nullptr, [=] __device__(uint32_t gidx) { // We need pos > 0 && neg > 0 if (dtotal_pos[gidx] <= 0.0 || dtotal_neg[gidx] <= 0.0) { atomicAdd(dauc_error, 1); } else { auto gbegin = dgroups[gidx]; auto gend = dgroups[gidx + 1]; // Calculate AUC double tp = 0.0, prevtp = 0.0, fp = 0.0, prevfp = 0.0, h = 0.0, a = 0.0, b = 0.0; for (auto i = gbegin; i < gend; ++i) { const auto wt = dweights == nullptr ? 1.0f : dweights[ngroups == 1 ? dpreds_orig_pos[i] : gidx]; tp += wt * dlabels[dpreds_orig_pos[i]]; fp += wt * (1.0f - dlabels[dpreds_orig_pos[i]]); if ((i < gend - 1 && dsorted_preds[i] != dsorted_preds[i + 1]) || (i == gend - 1)) { if (tp == prevtp) { a = 1.0; b = 0.0; } else { h = (fp - prevfp) / (tp - prevtp); a = 1.0 + h; b = (prevfp - h * prevtp) / dtotal_pos[gidx]; } if (0.0 != b) { dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx] - b / a * (std::log(a * tp / dtotal_pos[gidx] + b) - std::log(a * prevtp / dtotal_pos[gidx] + b))) / a; } else { dsum_auc[gidx] += (tp / dtotal_pos[gidx] - prevtp / dtotal_pos[gidx]) / a; } prevtp = tp; prevfp = fp; } } // Sanity check if (tp < 0 || prevtp < 0 || fp < 0 || prevfp < 0) { // Check if we have any metric error thus far auto current_auc_error = atomicAdd(dauc_error, 0); KERNEL_CHECK(!current_auc_error); } } }); const auto hsum_auc = thrust::reduce(thrust::cuda::par(alloc), sum_auc.begin(), sum_auc.end()); const auto hauc_error = auc_error.back(); // Copy it back to host // Report average AUC-PR across all groups // In distributed mode, workers which only contains pos or neg samples // will be ignored when aggregate AUC-PR. bst_float dat[2] = {0.0f, 0.0f}; if (hauc_error < static_cast<int>(ngroups)) { dat[0] = static_cast<bst_float>(hsum_auc); dat[1] = static_cast<bst_float>(static_cast<int>(ngroups) - hauc_error); } if (distributed) { rabit::Allreduce<rabit::op::Sum>(dat, 2); } CHECK_GT(dat[1], 0.0f) << "AUC-PR: the dataset only contains pos or neg samples"; CHECK_LE(dat[0], dat[1]) << "AUC-PR: AUC > 1.0"; return dat[0] / dat[1]; } const char* Name() const override { return "aucpr"; } }; XGBOOST_REGISTER_GPU_METRIC(AucGpu, "auc") .describe("Area under curve for rank computed on GPU.") .set_body([](const char* param) { return new EvalAucGpu(); }); XGBOOST_REGISTER_GPU_METRIC(AucPRGpu, "aucpr") .describe("Area under PR curve for rank computed on GPU.") .set_body([](const char* param) { return new EvalAucPRGpu(); }); XGBOOST_REGISTER_GPU_METRIC(PrecisionGpu, "pre") .describe("precision@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalPrecisionGpu>("pre", param); }); XGBOOST_REGISTER_GPU_METRIC(NDCGGpu, "ndcg") .describe("ndcg@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalNDCGGpu>("ndcg", param); }); XGBOOST_REGISTER_GPU_METRIC(MAPGpu, "map") .describe("map@k for rank computed on GPU.") .set_body([](const char* param) { return new EvalRankGpu<EvalMAPGpu>("map", param); }); } // namespace metric } // namespace xgboost
eb32900d2936b1c55dcd589c72047ad9a7561d03.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { if (a[5] != b[5]) { return 0.0; } float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void ml_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 6 tensor at::Tensor ml_nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( ml_nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
eb32900d2936b1c55dcd589c72047ad9a7561d03.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { if (a[5] != b[5]) { return 0.0; } float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void ml_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } // boxes is a N x 6 tensor at::Tensor ml_nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) { using scalar_t = float; AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor"); auto scores = boxes.select(1, 4); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto boxes_sorted = boxes.index_select(0, order_t); int boxes_num = boxes.size(0); const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock); scalar_t* boxes_dev = boxes_sorted.data<scalar_t>(); THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState unsigned long long* mask_dev = NULL; //THCudaCheck(THCudaMalloc(state, (void**) &mask_dev, // boxes_num * col_blocks * sizeof(unsigned long long))); mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long)); dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock), THCCeilDiv(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); ml_nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); THCudaCheck(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU)); int64_t* keep_out = keep.data<int64_t>(); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } THCudaFree(state, mask_dev); // TODO improve this part return std::get<0>(order_t.index({ keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to( order_t.device(), keep.scalar_type()) }).sort(0, false)); }
0dd90ee0d5070a628bad3653a17707268976ee97.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]> * SPDX-License-Identifier: BSD-3-Clause * SPDX-FileType: SOURCE * * This program is free software: you can redistribute it and/or modify it * under the terms of the license found in the LICENSE.txt file in the root * directory of this source tree. */ // ======= // Headers // ======= #include "./query_device.h" // ============ // query device // ============ /// \brief Queries GPU device information, such as the number of devices, /// number of multiprocessors, and the number of threads per each /// multiprocessor. /// /// \param[out] device_properties /// A struct to be filled with the number of devices, threads and /// multiprocessors. void query_device(DeviceProperties& device_properties) { // Query number of devices int num_devices; hipError_t error = hipGetDeviceCount(&num_devices); if (error != hipSuccess) { return; } // Set number of devices device_properties.set_num_devices(num_devices); // Read properties of each device struct hipDeviceProp_t properties; for (int device = 0; device < num_devices; ++device) { hipGetDeviceProperties(&properties, device); // Machines with no GPUs may still report one emulation device if (properties.major == 9999) { // This is a gpu emulation not an actual device device_properties.num_multiprocessors[device] = 0; device_properties.num_threads_per_multiprocessor[device] = 0; } else { device_properties.num_multiprocessors[device] = \ properties.multiProcessorCount; device_properties.num_threads_per_multiprocessor[device] = \ properties.maxThreadsPerMultiProcessor; } } }
0dd90ee0d5070a628bad3653a17707268976ee97.cu
/* * SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <[email protected]> * SPDX-License-Identifier: BSD-3-Clause * SPDX-FileType: SOURCE * * This program is free software: you can redistribute it and/or modify it * under the terms of the license found in the LICENSE.txt file in the root * directory of this source tree. */ // ======= // Headers // ======= #include "./query_device.h" // ============ // query device // ============ /// \brief Queries GPU device information, such as the number of devices, /// number of multiprocessors, and the number of threads per each /// multiprocessor. /// /// \param[out] device_properties /// A struct to be filled with the number of devices, threads and /// multiprocessors. void query_device(DeviceProperties& device_properties) { // Query number of devices int num_devices; cudaError_t error = cudaGetDeviceCount(&num_devices); if (error != cudaSuccess) { return; } // Set number of devices device_properties.set_num_devices(num_devices); // Read properties of each device struct cudaDeviceProp properties; for (int device = 0; device < num_devices; ++device) { cudaGetDeviceProperties(&properties, device); // Machines with no GPUs may still report one emulation device if (properties.major == 9999) { // This is a gpu emulation not an actual device device_properties.num_multiprocessors[device] = 0; device_properties.num_threads_per_multiprocessor[device] = 0; } else { device_properties.num_multiprocessors[device] = \ properties.multiProcessorCount; device_properties.num_threads_per_multiprocessor[device] = \ properties.maxThreadsPerMultiProcessor; } } }
b1cea09d580a411ac7ad6e33668b09a8fe6c3631.hip
// !!! This is a file automatically generated by hipify!!! /** * CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <hip/hip_runtime.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) // Max device memory : 4 GB #define MAX_MEMORY ((long long)4e9) //=============================================================================================// using namespace std; //=============================================================================================// #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; void printTime(float ms) { int h = ms / (1000*3600); int m = (((int)ms) / (1000*60)) % 60; int s = (((int)ms) / 1000) % 60; int intMS = ms; intMS %= 1000; printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS); printf("Time Taken in milliseconds : %d\n", (int)ms); } // Catch Cuda errors inline void gpuAssert(hipError_t error, const char *file, int line, bool abort = false) { if (error != hipSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, hipGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } //=============================================================================================// __global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount, int *sigma, int *distance, float *dependency, int *Q, int *Qpointers) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int Q_len; __shared__ int Qpointers_len; __shared__ int noOfBlocks; if(idx == 0) { s = blockIdx.x - gridDim.x; noOfBlocks = gridDim.x; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount - noOfBlocks) { if(idx == 0) { s += noOfBlocks; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); // printf("Node %d\n", s); Q[0 + (blockIdx.x * nodeCount)] = s; Q_len = 1; Qpointers[0 + (blockIdx.x * nodeCount)] = 0; Qpointers[1 + (blockIdx.x * nodeCount)] = 1; Qpointers_len = 1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v + (blockIdx.x * nodeCount)] = 0; sigma[v + (blockIdx.x * nodeCount)] = 1; } else { distance[v + (blockIdx.x * nodeCount)] = INT_MAX; sigma[v + (blockIdx.x * nodeCount)] = 0; } dependency[v + (blockIdx.x * nodeCount)] = 0.0; } __syncthreads(); // BFS while(true) { __syncthreads(); for(int k=idx; k<Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(atomicCAS(&distance[w + (blockIdx.x * nodeCount)], INT_MAX, distance[v + (blockIdx.x * nodeCount)] +1) == INT_MAX) { int t = atomicAdd(&Q_len, 1); Q[t + (blockIdx.x * nodeCount)] = w; } if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)]+1)) { atomicAdd(&sigma[w + (blockIdx.x * nodeCount)], sigma[v + (blockIdx.x * nodeCount)]); } } } __syncthreads(); if(Q_len == Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]) break; if(idx == 0) { Qpointers_len++; Qpointers[Qpointers_len + (blockIdx.x * nodeCount)] = Q_len; } __syncthreads(); } __syncthreads(); // Reverse BFS while(Qpointers_len > 0) { for(int k=idx; k < Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)] + 1)) { if (sigma[w + (blockIdx.x * nodeCount)] != 0) dependency[v + (blockIdx.x * nodeCount)] += (sigma[v + (blockIdx.x * nodeCount)] * 1.0 / sigma[w + (blockIdx.x * nodeCount)]) * (1 + dependency[w + (blockIdx.x * nodeCount)]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. atomicAdd(bwCentrality + v, dependency[v + (blockIdx.x * nodeCount)] / 2); } } __syncthreads(); if(idx == 0) Qpointers_len--; __syncthreads(); } } } float *betweennessCentrality(Graph *graph, int nodeCount) { float *bwCentrality = new float[nodeCount](); float *device_bwCentrality, *dependency; int *sigma, *distance, *Q, *Qpointers; const int BLOCK_COUNT = MAX_MEMORY / (4 * 5 * nodeCount); // pritnf(">> %d\n", BLOCK_COUNT); //TODO: Allocate device memory for bwCentrality catchCudaError(hipMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount)); catchCudaError(hipMalloc((void **)&sigma, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&distance, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&Q, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&Qpointers, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(hipMalloc((void **)&dependency, sizeof(float) * nodeCount * BLOCK_COUNT)); catchCudaError(hipMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, hipMemcpyHostToDevice)); // Timer hipEvent_t device_start, device_end; catchCudaError(hipEventCreate(&device_start)); catchCudaError(hipEventCreate(&device_end)); catchCudaError(hipEventRecord(device_start)); hipLaunchKernelGGL(( betweennessCentralityKernel), dim3(BLOCK_COUNT), dim3(MAX_THREAD_COUNT), 0, 0, graph, device_bwCentrality, nodeCount, sigma, distance, dependency, Q, Qpointers); hipDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(hipEventRecord(device_end)); catchCudaError(hipEventSynchronize(device_end)); hipEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(hipMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, hipMemcpyDeviceToHost)); catchCudaError(hipFree(device_bwCentrality)); catchCudaError(hipFree(sigma)); catchCudaError(hipFree(dependency)); catchCudaError(hipFree(distance)); catchCudaError(hipFree(Q)); catchCudaError(hipFree(Qpointers)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(hipMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(hipMemcpy(device_graph, host_graph, sizeof(Graph), hipMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(hipMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), hipMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(hipMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(hipMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), hipMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(hipMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), hipMemcpyHostToDevice)); float *bwCentrality = betweennessCentrality(device_graph, nodeCount); float maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); if (choice == 'y' || choice == 'Y') printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]); } cout << endl; printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness); printTime(device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(hipFree(adjacencyList)); catchCudaError(hipFree(adjacencyListPointers)); catchCudaError(hipFree(device_graph)); }
b1cea09d580a411ac7ad6e33668b09a8fe6c3631.cu
/** * CUDA C/C++ implementation for Accelerating Graph Betweenness Centrality for Sparse Graphs * * @author Ashwin Joisa * @author Praveen Gupta **/ //=============================================================================================// // Include header files #include <iostream> #include <cuda.h> // Include custom header file for implementation of Graphs #include "Graph.h" //=============================================================================================// #define MAX_THREAD_COUNT 1024 #define CEIL(a, b) ((a - 1) / b + 1) // Max device memory : 4 GB #define MAX_MEMORY ((long long)4e9) //=============================================================================================// using namespace std; //=============================================================================================// #define catchCudaError(error) { gpuAssert((error), __FILE__, __LINE__); } float device_time_taken; void printTime(float ms) { int h = ms / (1000*3600); int m = (((int)ms) / (1000*60)) % 60; int s = (((int)ms) / 1000) % 60; int intMS = ms; intMS %= 1000; printf("Time Taken (Parallel) = %dh %dm %ds %dms\n", h, m, s, intMS); printf("Time Taken in milliseconds : %d\n", (int)ms); } // Catch Cuda errors inline void gpuAssert(cudaError_t error, const char *file, int line, bool abort = false) { if (error != cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s in CUDA %s\n", error, cudaGetErrorString(error)); printf("\nIn file :%s\nOn line: %d", file, line); if(abort) exit(-1); } } //=============================================================================================// __global__ void betweennessCentralityKernel(Graph *graph, float *bwCentrality, int nodeCount, int *sigma, int *distance, float *dependency, int *Q, int *Qpointers) { int idx = threadIdx.x; if(idx >= nodeCount) return; __shared__ int s; __shared__ int Q_len; __shared__ int Qpointers_len; __shared__ int noOfBlocks; if(idx == 0) { s = blockIdx.x - gridDim.x; noOfBlocks = gridDim.x; // printf("Progress... %3d%%", 0); } __syncthreads(); while(s < nodeCount - noOfBlocks) { if(idx == 0) { s += noOfBlocks; // printf("\rProgress... %5.2f%%", (s+1)*100.0/nodeCount); // printf("Node %d\n", s); Q[0 + (blockIdx.x * nodeCount)] = s; Q_len = 1; Qpointers[0 + (blockIdx.x * nodeCount)] = 0; Qpointers[1 + (blockIdx.x * nodeCount)] = 1; Qpointers_len = 1; } __syncthreads(); for(int v=idx; v<nodeCount; v+=blockDim.x) { if(v == s) { distance[v + (blockIdx.x * nodeCount)] = 0; sigma[v + (blockIdx.x * nodeCount)] = 1; } else { distance[v + (blockIdx.x * nodeCount)] = INT_MAX; sigma[v + (blockIdx.x * nodeCount)] = 0; } dependency[v + (blockIdx.x * nodeCount)] = 0.0; } __syncthreads(); // BFS while(true) { __syncthreads(); for(int k=idx; k<Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(atomicCAS(&distance[w + (blockIdx.x * nodeCount)], INT_MAX, distance[v + (blockIdx.x * nodeCount)] +1) == INT_MAX) { int t = atomicAdd(&Q_len, 1); Q[t + (blockIdx.x * nodeCount)] = w; } if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)]+1)) { atomicAdd(&sigma[w + (blockIdx.x * nodeCount)], sigma[v + (blockIdx.x * nodeCount)]); } } } __syncthreads(); if(Q_len == Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]) break; if(idx == 0) { Qpointers_len++; Qpointers[Qpointers_len + (blockIdx.x * nodeCount)] = Q_len; } __syncthreads(); } __syncthreads(); // Reverse BFS while(Qpointers_len > 0) { for(int k=idx; k < Qpointers[Qpointers_len + (blockIdx.x * nodeCount)]; k+=blockDim.x) { if(k < Qpointers[Qpointers_len -1 + (blockIdx.x * nodeCount)]) continue; int v = Q[k + (blockIdx.x * nodeCount)]; for(int r = graph->adjacencyListPointers[v]; r < graph->adjacencyListPointers[v + 1]; r++) { int w = graph->adjacencyList[r]; if(distance[w + (blockIdx.x * nodeCount)] == (distance[v + (blockIdx.x * nodeCount)] + 1)) { if (sigma[w + (blockIdx.x * nodeCount)] != 0) dependency[v + (blockIdx.x * nodeCount)] += (sigma[v + (blockIdx.x * nodeCount)] * 1.0 / sigma[w + (blockIdx.x * nodeCount)]) * (1 + dependency[w + (blockIdx.x * nodeCount)]); } } if (v != s) { // Each shortest path is counted twice. So, each partial shortest path dependency is halved. atomicAdd(bwCentrality + v, dependency[v + (blockIdx.x * nodeCount)] / 2); } } __syncthreads(); if(idx == 0) Qpointers_len--; __syncthreads(); } } } float *betweennessCentrality(Graph *graph, int nodeCount) { float *bwCentrality = new float[nodeCount](); float *device_bwCentrality, *dependency; int *sigma, *distance, *Q, *Qpointers; const int BLOCK_COUNT = MAX_MEMORY / (4 * 5 * nodeCount); // pritnf(">> %d\n", BLOCK_COUNT); //TODO: Allocate device memory for bwCentrality catchCudaError(cudaMalloc((void **)&device_bwCentrality, sizeof(float) * nodeCount)); catchCudaError(cudaMalloc((void **)&sigma, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&distance, sizeof(int) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&Q, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&Qpointers, sizeof(int) * (nodeCount) * BLOCK_COUNT)); catchCudaError(cudaMalloc((void **)&dependency, sizeof(float) * nodeCount * BLOCK_COUNT)); catchCudaError(cudaMemcpy(device_bwCentrality, bwCentrality, sizeof(float) * nodeCount, cudaMemcpyHostToDevice)); // Timer cudaEvent_t device_start, device_end; catchCudaError(cudaEventCreate(&device_start)); catchCudaError(cudaEventCreate(&device_end)); catchCudaError(cudaEventRecord(device_start)); betweennessCentralityKernel<<<BLOCK_COUNT, MAX_THREAD_COUNT>>>(graph, device_bwCentrality, nodeCount, sigma, distance, dependency, Q, Qpointers); cudaDeviceSynchronize(); //End of progress bar cout << endl; // Timer catchCudaError(cudaEventRecord(device_end)); catchCudaError(cudaEventSynchronize(device_end)); cudaEventElapsedTime(&device_time_taken, device_start, device_end); // Copy back and free memory catchCudaError(cudaMemcpy(bwCentrality, device_bwCentrality, sizeof(float) * nodeCount, cudaMemcpyDeviceToHost)); catchCudaError(cudaFree(device_bwCentrality)); catchCudaError(cudaFree(sigma)); catchCudaError(cudaFree(dependency)); catchCudaError(cudaFree(distance)); catchCudaError(cudaFree(Q)); catchCudaError(cudaFree(Qpointers)); return bwCentrality; } int main(int argc, char *argv[]) { if (argc < 2) { cout << "Usage: " << argv[0] << " <graph_input_file> [output_file]\n"; return 0; } char choice; cout << "Would you like to print the Graph Betweenness Centrality for all nodes? (y/n) "; cin >> choice; freopen(argv[1], "r", stdin); Graph *host_graph = new Graph(); Graph *device_graph; catchCudaError(cudaMalloc((void **)&device_graph, sizeof(Graph))); host_graph->readGraph(); int nodeCount = host_graph->getNodeCount(); int edgeCount = host_graph->getEdgeCount(); catchCudaError(cudaMemcpy(device_graph, host_graph, sizeof(Graph), cudaMemcpyHostToDevice)); // Copy Adjancency List to device int *adjacencyList; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyList, sizeof(int) * (2 * edgeCount + 1))); catchCudaError(cudaMemcpy(adjacencyList, host_graph->adjacencyList, sizeof(int) * (2 * edgeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyList), &adjacencyList, sizeof(int *), cudaMemcpyHostToDevice)); // Copy Adjancency List Pointers to device int *adjacencyListPointers; // Alocate device memory and copy catchCudaError(cudaMalloc((void **)&adjacencyListPointers, sizeof(int) * (nodeCount + 1))); catchCudaError(cudaMemcpy(adjacencyListPointers, host_graph->adjacencyListPointers, sizeof(int) * (nodeCount + 1), cudaMemcpyHostToDevice)); // Update the pointer to this, in device_graph catchCudaError(cudaMemcpy(&(device_graph->adjacencyListPointers), &adjacencyListPointers, sizeof(int *), cudaMemcpyHostToDevice)); float *bwCentrality = betweennessCentrality(device_graph, nodeCount); float maxBetweenness = -1; for (int i = 0; i < nodeCount; i++) { maxBetweenness = max(maxBetweenness, bwCentrality[i]); if (choice == 'y' || choice == 'Y') printf("Node %d => Betweeness Centrality %0.2lf\n", i, bwCentrality[i]); } cout << endl; printf("\nMaximum Betweenness Centrality ==> %0.2lf\n", maxBetweenness); printTime(device_time_taken); if (argc == 3) { freopen(argv[2], "w", stdout); for (int i = 0; i < nodeCount; i++) cout << bwCentrality[i] << " "; cout << endl; } // Free all memory delete[] bwCentrality; catchCudaError(cudaFree(adjacencyList)); catchCudaError(cudaFree(adjacencyListPointers)); catchCudaError(cudaFree(device_graph)); }
da09ca5583c1d8843e48d6fa9c9d7f360fe9cdb5.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void mul(float *d_A, float *d_B, float *d_C, int n); void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n); int main() { int n; int i, j; float **h_Mat1, **h_Mat2, **h_Mat3; printf("Enter the dimension of square matrix, n for n X n: "); scanf("%d", &n); h_Mat1 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat1[i] = (float *) malloc(n * sizeof(float)); } h_Mat2 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat2[i] = (float *) malloc(n * sizeof(float)); } h_Mat3 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat3[i] = (float *) malloc(n * sizeof(float)); } srand(time(0)); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_Mat1[i][j] = rand() % 1000; h_Mat2[i][j] = rand() % 1000; } } matMul(h_Mat1, h_Mat2, h_Mat3, n); return 0; } __global__ void mul(float *d_A, float *d_B, float *d_C, int n) { int i, j, k; i = blockIdx.y * blockDim.y + threadIdx.y; j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n || j >= n) { return; } d_C[i * n + j] = 0; for (k = 0; k < n; ++k) { d_C[i * n + j] += d_A[i * n + k] * d_B[k * n + j]; } return; } void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n) { int size = n * n * sizeof(float); int i, j, k; float *h_A, *h_B, *h_C; float *d_A = NULL, *d_B = NULL, *d_C = NULL; hipError_t err = hipSuccess; h_A = (float *) malloc(size); h_B = (float *) malloc(size); h_C = (float *) malloc(size); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_A[i * n + j] = h_Mat1[i][j]; h_B[i * n + j] = h_Mat2[i][j]; } } err = hipMalloc((void **) &d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **) &d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **) &d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Launching CUDA mul kernel with (%d, %d, %d) blocks and (%d, %d, %d) threads per block.\n", (n + 15) / 16, (n + 15) / 16, 1, 16, 16, 1); dim3 grid((n + 15) / 16, (n + 15) / 16, 1); dim3 block(16, 16, 1); hipLaunchKernelGGL(( mul), dim3(block), dim3(grid), 0, 0, d_A, d_B, d_C, n); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch mul kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipFree(d_A); hipFree(d_B); hipFree(d_C); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_Mat3[i][j] = 0; for (k = 0; k < n; ++k) { h_Mat3[i][j] += h_Mat1[i][k] * h_Mat2[k][j]; } if (fabs(h_C[i * n + j] - h_Mat3[i][j]) > 1e-5) { fprintf(stderr, "Result verification failed at element (%d, %d)!\n", i, j); exit(EXIT_FAILURE); } h_Mat3[i][j] = h_C[i * n + j]; } } printf("TEST PASSED\n"); return; }
da09ca5583c1d8843e48d6fa9c9d7f360fe9cdb5.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void mul(float *d_A, float *d_B, float *d_C, int n); void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n); int main() { int n; int i, j; float **h_Mat1, **h_Mat2, **h_Mat3; printf("Enter the dimension of square matrix, n for n X n: "); scanf("%d", &n); h_Mat1 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat1[i] = (float *) malloc(n * sizeof(float)); } h_Mat2 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat2[i] = (float *) malloc(n * sizeof(float)); } h_Mat3 = (float **) malloc(n * sizeof(float *)); for (i = 0; i < n; ++i) { h_Mat3[i] = (float *) malloc(n * sizeof(float)); } srand(time(0)); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_Mat1[i][j] = rand() % 1000; h_Mat2[i][j] = rand() % 1000; } } matMul(h_Mat1, h_Mat2, h_Mat3, n); return 0; } __global__ void mul(float *d_A, float *d_B, float *d_C, int n) { int i, j, k; i = blockIdx.y * blockDim.y + threadIdx.y; j = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n || j >= n) { return; } d_C[i * n + j] = 0; for (k = 0; k < n; ++k) { d_C[i * n + j] += d_A[i * n + k] * d_B[k * n + j]; } return; } void matMul(float **h_Mat1, float **h_Mat2, float **h_Mat3, int n) { int size = n * n * sizeof(float); int i, j, k; float *h_A, *h_B, *h_C; float *d_A = NULL, *d_B = NULL, *d_C = NULL; cudaError_t err = cudaSuccess; h_A = (float *) malloc(size); h_B = (float *) malloc(size); h_C = (float *) malloc(size); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_A[i * n + j] = h_Mat1[i][j]; h_B[i * n + j] = h_Mat2[i][j]; } } err = cudaMalloc((void **) &d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Launching CUDA mul kernel with (%d, %d, %d) blocks and (%d, %d, %d) threads per block.\n", (n + 15) / 16, (n + 15) / 16, 1, 16, 16, 1); dim3 grid((n + 15) / 16, (n + 15) / 16, 1); dim3 block(16, 16, 1); mul<<<block, grid>>>(d_A, d_B, d_C, n); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch mul kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); for (i = 0; i < n; ++i) { for (j = 0; j < n; ++j) { h_Mat3[i][j] = 0; for (k = 0; k < n; ++k) { h_Mat3[i][j] += h_Mat1[i][k] * h_Mat2[k][j]; } if (fabs(h_C[i * n + j] - h_Mat3[i][j]) > 1e-5) { fprintf(stderr, "Result verification failed at element (%d, %d)!\n", i, j); exit(EXIT_FAILURE); } h_Mat3[i][j] = h_C[i * n + j]; } } printf("TEST PASSED\n"); return; }
a5d37fd256f81e31876a5373a9efe7166619dedb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "svgf/svgf.h" #include "kernel/StreamCompaction.h" #include "kernel/context.cuh" #include "kernel/light.cuh" #include "kernel/material.cuh" #include "kernel/intersect.cuh" #include "kernel/accelerator.cuh" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" #define ENABLE_PERSISTENT_THREAD __global__ void genPath( idaten::TileDomain tileDomain, bool isFillAOV, idaten::SVGFPathTracing::Path* paths, aten::ray* rays, int width, int height, int sample, int maxSamples, unsigned int frame, const aten::CameraParameter* __restrict__ camera, const void* samplerValues, const unsigned int* __restrict__ random) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= width || iy >= height) { return; } const auto idx = getIdx(ix, iy, width); paths->attrib[idx].isHit = false; if (paths->attrib[idx].isKill) { paths->attrib[idx].isTerminate = true; return; } #if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL auto scramble = random[idx] * 0x1fe3434f; paths->sampler[idx].init(frame, 0, scramble, samplerValues); #elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ auto rnd = random[idx]; auto scramble = rnd * 0x1fe3434f * ((frame + 133 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM)); paths->sampler[idx].init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 0, scramble); #endif float r1 = paths->sampler[idx].nextSample(); float r2 = paths->sampler[idx].nextSample(); if (isFillAOV) { r1 = r2 = 0.5f; } ix += tileDomain.x; iy += tileDomain.y; float s = (ix + r1) / (float)(camera->width); float t = (iy + r2) / (float)(camera->height); AT_NAME::CameraSampleResult camsample; AT_NAME::PinholeCamera::sample(&camsample, camera, s, t); rays[idx] = camsample.r; paths->throughput[idx].throughput = aten::vec3(1); paths->throughput[idx].pdfb = 0.0f; paths->attrib[idx].isTerminate = false; paths->attrib[idx].isSingular = false; paths->contrib[idx].samples += 1; // Accumulate value, so do not reset. //path.contrib = aten::vec3(0); } // NOTE // persistent thread. // https://gist.github.com/guozhou/b972bb42bbc5cba1f062#file-persistent-cpp-L15 // NOTE // compute capability 6.0 // http://homepages.math.uic.edu/~jan/mcs572/performance_considerations.pdf // p3 #define NUM_SM 64 // no. of streaming multiprocessors #define NUM_WARP_PER_SM 64 // maximum no. of resident warps per SM #define NUM_BLOCK_PER_SM 32 // maximum no. of resident blocks per SM #define NUM_BLOCK (NUM_SM * NUM_BLOCK_PER_SM) #define NUM_WARP_PER_BLOCK (NUM_WARP_PER_SM / NUM_BLOCK_PER_SM) #define WARP_SIZE 32 __device__ unsigned int g_headDev = 0; __global__ void hitTest( idaten::TileDomain tileDomain, idaten::SVGFPathTracing::Path* paths, aten::Intersection* isects, aten::ray* rays, int* hitbools, int width, int height, const aten::GeomParameter* __restrict__ shapes, int geomnum, const aten::LightParameter* __restrict__ lights, int lightnum, hipTextureObject_t* nodes, const aten::PrimitiveParamter* __restrict__ prims, hipTextureObject_t vtxPos, aten::mat4* matrices, int bounce, float hitDistLimit) { #ifdef ENABLE_PERSISTENT_THREAD // warp-wise head index of tasks in a block __shared__ volatile unsigned int headBlock[NUM_WARP_PER_BLOCK]; volatile unsigned int& headWarp = headBlock[threadIdx.y]; if (blockIdx.x == 0 && threadIdx.x == 0) { g_headDev = 0; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } do { // let lane 0 fetch [wh, wh + WARP_SIZE - 1] for a warp if (threadIdx.x == 0) { headWarp = atomicAdd(&g_headDev, WARP_SIZE); } // task index per thread in a warp unsigned int idx = headWarp + threadIdx.x; if (idx >= tileDomain.w * tileDomain.h) { return; } paths->attrib[idx].isHit = false; hitbools[idx] = 0; if (paths->attrib[idx].isTerminate) { continue; } aten::Intersection isect; float t_max = AT_MATH_INF; if (bounce >= 1 && !paths->attrib[idx].isSingular) { t_max = hitDistLimit; } // TODO // Voxel. // . //bool enableLod = (bounce >= 2); bool enableLod = false; int depth = 9; bool isHit = intersectClosest(&ctxt, rays[idx], &isect, t_max, enableLod, depth); #if 0 isects[idx].t = isect.t; isects[idx].objid = isect.objid; isects[idx].mtrlid = isect.mtrlid; isects[idx].meshid = isect.meshid; isects[idx].primid = isect.primid; isects[idx].a = isect.a; isects[idx].b = isect.b; #else isects[idx] = isect; #endif if (bounce >= 1 && !paths->attrib[idx].isSingular && isect.t > hitDistLimit) { isHit = false; paths->attrib[idx].isTerminate = true; } paths->attrib[idx].isHit = isHit; hitbools[idx] = isHit ? 1 : 0; } while (true); #else const auto ix = blockIdx.x * blockDim.x + threadIdx.x; const auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); paths->attrib[idx].isHit = false; hitbools[idx] = 0; if (paths->attrib[idx].isTerminate) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } aten::Intersection isect; float t_max = AT_MATH_INF; if (bounce >= 1 && !paths->attrib[idx].isSingular) { t_max = hitDistLimit; } bool isHit = intersectClosest(&ctxt, rays[idx], &isect, t_max); #if 0 isects[idx].t = isect.t; isects[idx].objid = isect.objid; isects[idx].mtrlid = isect.mtrlid; isects[idx].meshid = isect.meshid; isects[idx].area = isect.area; isects[idx].primid = isect.primid; isects[idx].a = isect.a; isects[idx].b = isect.b; #else isects[idx] = isect; #endif if (bounce >= 1 && !paths->attrib[idx].isSingular && isect.t > hitDistLimit) { isHit = false; } paths->attrib[idx].isHit = isHit; hitbools[idx] = isHit ? 1 : 0; #endif } __global__ void shadeMiss( idaten::TileDomain tileDomain, int bounce, float4* aovNormalDepth, float4* aovTexclrMeshid, idaten::SVGFPathTracing::Path* paths, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); if (!paths->attrib[idx].isTerminate && !paths->attrib[idx].isHit) { // TODO auto bg = aten::vec3(0); if (bounce == 0) { paths->attrib[idx].isKill = true; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // Export bg color to albedo buffer. aovTexclrMeshid[_idx] = make_float4(bg.x, bg.y, bg.z, -1); aovNormalDepth[_idx].w = -1; // For exporting separated albedo. bg = aten::vec3(1, 1, 1); } auto contrib = paths->throughput[idx].throughput * bg; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); paths->attrib[idx].isTerminate = true; } } __global__ void shadeMissWithEnvmap( idaten::TileDomain tileDomain, int offsetX, int offsetY, int bounce, const aten::CameraParameter* __restrict__ camera, float4* aovNormalDepth, float4* aovTexclrMeshid, hipTextureObject_t* textures, int envmapIdx, real envmapAvgIllum, real envmapMultiplyer, idaten::SVGFPathTracing::Path* paths, const aten::ray* __restrict__ rays, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); if (!paths->attrib[idx].isTerminate && !paths->attrib[idx].isHit) { aten::vec3 dir = rays[idx].dir; if (bounce == 0) { // Suppress jittering envrinment map. // So, re-sample ray without random. // TODO // More efficient way... float s = (ix + offsetX) / (float)(width); float t = (iy + offsetY) / (float)(height); AT_NAME::CameraSampleResult camsample; AT_NAME::PinholeCamera::sample(&camsample, camera, s, t); dir = camsample.r.dir; } auto uv = AT_NAME::envmap::convertDirectionToUV(dir); auto bg = tex2D<float4>(textures[envmapIdx], uv.x, uv.y); auto emit = aten::vec3(bg.x, bg.y, bg.z); float misW = 1.0f; if (bounce == 0 || (bounce == 1 && paths->attrib[idx].isSingular)) { paths->attrib[idx].isKill = true; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // Export envmap to albedo buffer. aovTexclrMeshid[_idx] = make_float4(emit.x, emit.y, emit.z, -1); aovNormalDepth[_idx].w = -1; // For exporting separated albedo. emit = aten::vec3(1, 1, 1); } else { auto pdfLight = AT_NAME::ImageBasedLight::samplePdf(emit, envmapAvgIllum); misW = paths->throughput[idx].pdfb / (pdfLight + paths->throughput[idx].pdfb); emit *= envmapMultiplyer; } auto contrib = paths->throughput[idx].throughput * misW * emit; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); paths->attrib[idx].isTerminate = true; } } __global__ void shade( idaten::TileDomain tileDomain, float4* aovNormalDepth, float4* aovTexclrMeshid, aten::mat4 mtxW2C, int width, int height, idaten::SVGFPathTracing::Path* paths, const int* __restrict__ hitindices, int* hitnum, const aten::Intersection* __restrict__ isects, aten::ray* rays, int frame, int bounce, int rrBounce, const aten::GeomParameter* __restrict__ shapes, int geomnum, const aten::MaterialParameter* __restrict__ mtrls, const aten::LightParameter* __restrict__ lights, int lightnum, const aten::PrimitiveParamter* __restrict__ prims, hipTextureObject_t vtxPos, hipTextureObject_t vtxNml, const aten::mat4* __restrict__ matrices, hipTextureObject_t* textures, unsigned int* random, idaten::SVGFPathTracing::ShadowRay* shadowRays) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= *hitnum) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.mtrls = mtrls; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.vtxNml = vtxNml; ctxt.matrices = matrices; ctxt.textures = textures; } idx = hitindices[idx]; __shared__ idaten::SVGFPathTracing::ShadowRay shShadowRays[64 * idaten::SVGFPathTracing::ShadowRayNum]; __shared__ aten::MaterialParameter shMtrls[64]; const auto ray = rays[idx]; #if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL auto scramble = random[idx] * 0x1fe3434f; paths->sampler[idx].init(frame, 4 + bounce * 300, scramble); #elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ auto rnd = random[idx]; auto scramble = rnd * 0x1fe3434f * ((frame + 331 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM)); paths->sampler[idx].init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 4 + bounce * 300, scramble); #endif aten::hitrecord rec; const auto& isect = isects[idx]; auto obj = &ctxt.shapes[isect.objid]; evalHitResult(&ctxt, obj, ray, &rec, &isect); bool isBackfacing = dot(rec.normal, -ray.dir) < 0.0f; // . // . aten::vec3 orienting_normal = rec.normal; if (rec.mtrlid >= 0) { shMtrls[threadIdx.x] = ctxt.mtrls[rec.mtrlid]; #if 1 if (rec.isVoxel) { // Replace to lambert. const auto& albedo = ctxt.mtrls[rec.mtrlid].baseColor; shMtrls[threadIdx.x] = aten::MaterialParameter(aten::MaterialType::Lambert, MaterialAttributeLambert); shMtrls[threadIdx.x].baseColor = albedo; } #endif if (shMtrls[threadIdx.x].type != aten::MaterialType::Layer) { shMtrls[threadIdx.x].albedoMap = (int)(shMtrls[threadIdx.x].albedoMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].albedoMap] : -1); shMtrls[threadIdx.x].normalMap = (int)(shMtrls[threadIdx.x].normalMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].normalMap] : -1); shMtrls[threadIdx.x].roughnessMap = (int)(shMtrls[threadIdx.x].roughnessMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].roughnessMap] : -1); } } else { // TODO shMtrls[threadIdx.x] = aten::MaterialParameter(aten::MaterialType::Lambert, MaterialAttributeLambert); shMtrls[threadIdx.x].baseColor = aten::vec3(1.0f); } // Render AOVs. // NOTE // AOV. // temporal reprojectionatrous. // . if (bounce == 0) { int ix = idx % tileDomain.w; int iy = idx / tileDomain.w; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // World coordinate to Clip coordinate. aten::vec4 pos = aten::vec4(rec.p, 1); pos = mtxW2C.apply(pos); // normal, depth aovNormalDepth[_idx] = make_float4(orienting_normal.x, orienting_normal.y, orienting_normal.z, pos.w); // texture color, meshid. auto texcolor = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1.0f)); #if 0 aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.meshid); #else aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.mtrlid); #endif // For exporting separated albedo. shMtrls[threadIdx.x].albedoMap = -1; } // TODO // How to deal Refraction? else if (bounce == 1 && paths->attrib[idx].mtrlType == aten::MaterialType::Specular) { int ix = idx % tileDomain.w; int iy = idx / tileDomain.w; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // World coordinate to Clip coordinate. aten::vec4 pos = aten::vec4(rec.p, 1); pos = mtxW2C.apply(pos); // normal, depth aovNormalDepth[_idx] = make_float4(orienting_normal.x, orienting_normal.y, orienting_normal.z, pos.w); // texture color. auto texcolor = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1.0f)); #if 0 aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.meshid); #else aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.mtrlid); #endif // For exporting separated albedo. shMtrls[threadIdx.x].albedoMap = -1; } // Implicit conection to light. if (shMtrls[threadIdx.x].attrib.isEmissive) { if (!isBackfacing) { float weight = 1.0f; if (bounce > 0 && !paths->attrib[idx].isSingular) { auto cosLight = dot(orienting_normal, -ray.dir); auto dist2 = aten::squared_length(rec.p - ray.org); if (cosLight >= 0) { auto pdfLight = 1 / rec.area; // Convert pdf area to sradian. // http://www.slideshare.net/h013/edubpt-v100 // p31 - p35 pdfLight = pdfLight * dist2 / cosLight; weight = paths->throughput[idx].pdfb / (pdfLight + paths->throughput[idx].pdfb); } } auto contrib = paths->throughput[idx].throughput * weight * shMtrls[threadIdx.x].baseColor; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); } // When ray hit the light, tracing will finish. paths->attrib[idx].isTerminate = true; return; } if (!shMtrls[threadIdx.x].attrib.isTranslucent && isBackfacing) { orienting_normal = -orienting_normal; } // Apply normal map. int normalMap = shMtrls[threadIdx.x].normalMap; if (shMtrls[threadIdx.x].type == aten::MaterialType::Layer) { // NormalMap . auto* topmtrl = &ctxt.mtrls[shMtrls[threadIdx.x].layer[0]]; normalMap = (int)(topmtrl->normalMap >= 0 ? ctxt.textures[topmtrl->normalMap] : -1); } AT_NAME::applyNormalMap(normalMap, orienting_normal, orienting_normal, rec.u, rec.v); auto albedo = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1), bounce); #if 1 #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].isActive = false; } // Explicit conection to light. if (!(shMtrls[threadIdx.x].attrib.isSingular || shMtrls[threadIdx.x].attrib.isTranslucent)) { auto shadowRayOrg = rec.p + AT_MATH_EPSILON * orienting_normal; for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { real lightSelectPdf = 1; aten::LightSampleResult sampleres; // TODO // Importance sampling. int lightidx = aten::cmpMin<int>(paths->sampler[idx].nextSample() * lightnum, lightnum - 1); lightSelectPdf = 1.0f / lightnum; aten::LightParameter light; light.pos = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 0]; light.dir = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 1]; light.le = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 2]; light.v0 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 3]; light.v1 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 4]; light.v2 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 5]; //auto light = ctxt.lights[lightidx]; sampleLight(&sampleres, &ctxt, &light, rec.p, orienting_normal, &paths->sampler[idx], bounce); const auto& posLight = sampleres.pos; const auto& nmlLight = sampleres.nml; real pdfLight = sampleres.pdf; auto dirToLight = normalize(sampleres.dir); auto distToLight = length(posLight - rec.p); auto tmp = rec.p + dirToLight - shadowRayOrg; auto shadowRayDir = normalize(tmp); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].isActive = true; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].rayorg = shadowRayOrg; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].raydir = shadowRayDir; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].targetLightId = lightidx; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].distToLight = distToLight; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = aten::vec3(0); { auto cosShadow = dot(orienting_normal, dirToLight); real pdfb = samplePDF(&ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, dirToLight, rec.u, rec.v); auto bsdf = sampleBSDF(&ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, dirToLight, rec.u, rec.v, albedo); bsdf *= paths->throughput[idx].throughput; // Get light color. auto emit = sampleres.finalColor; if (light.attrib.isSingular || light.attrib.isInfinite) { if (pdfLight > real(0) && cosShadow >= 0) { // TODO // . // singular light finalColor . // inifinite light pdfLight. // pdfLight. auto misW = pdfLight / (pdfb + pdfLight); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = (misW * bsdf * emit * cosShadow / pdfLight) / lightSelectPdf / (float)idaten::SVGFPathTracing::ShadowRayNum; } } else { auto cosLight = dot(nmlLight, -dirToLight); if (cosShadow >= 0 && cosLight >= 0) { auto dist2 = aten::squared_length(sampleres.dir); auto G = cosShadow * cosLight / dist2; if (pdfb > real(0) && pdfLight > real(0)) { // Convert pdf from steradian to area. // http://www.slideshare.net/h013/edubpt-v100 // p31 - p35 pdfb = pdfb * cosLight / dist2; auto misW = pdfLight / (pdfb + pdfLight); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = (misW * (bsdf * emit * G) / pdfLight) / lightSelectPdf / (float)idaten::SVGFPathTracing::ShadowRayNum;; } } } } } } #endif real russianProb = real(1); if (bounce > rrBounce) { auto t = normalize(paths->throughput[idx].throughput); auto p = aten::cmpMax(t.r, aten::cmpMax(t.g, t.b)); russianProb = paths->sampler[idx].nextSample(); if (russianProb >= p) { //shPaths[threadIdx.x].contrib = aten::vec3(0); paths->attrib[idx].isTerminate = true; } else { russianProb = max(p, 0.01f); } } AT_NAME::MaterialSampling sampling; sampleMaterial( &sampling, &ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, rec.normal, &paths->sampler[idx], rec.u, rec.v, albedo); auto nextDir = normalize(sampling.dir); auto pdfb = sampling.pdf; auto bsdf = sampling.bsdf; real c = 1; if (!shMtrls[threadIdx.x].attrib.isSingular) { // TODO // AMDabs.... c = aten::abs(dot(orienting_normal, nextDir)); //c = dot(orienting_normal, nextDir); } if (pdfb > 0 && c > 0) { paths->throughput[idx].throughput *= bsdf * c / pdfb; paths->throughput[idx].throughput /= russianProb; } else { paths->attrib[idx].isTerminate = true; } // Make next ray. rays[idx] = aten::ray(rec.p, nextDir); paths->throughput[idx].pdfb = pdfb; paths->attrib[idx].isSingular = shMtrls[threadIdx.x].attrib.isSingular; paths->attrib[idx].mtrlType = shMtrls[threadIdx.x].type; #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { shadowRays[idx * idaten::SVGFPathTracing::ShadowRayNum + i] = shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i]; } } __global__ void hitShadowRay( int bounce, idaten::SVGFPathTracing::Path* paths, int* hitindices, int* hitnum, const idaten::SVGFPathTracing::ShadowRay* __restrict__ shadowRays, const aten::GeomParameter* __restrict__ shapes, int geomnum, aten::MaterialParameter* mtrls, const aten::LightParameter* __restrict__ lights, int lightnum, hipTextureObject_t* nodes, const aten::PrimitiveParamter* __restrict__ prims, hipTextureObject_t vtxPos, const aten::mat4* __restrict__ matrices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= *hitnum) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.mtrls = mtrls; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } idx = hitindices[idx]; #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { const auto& shadowRay = shadowRays[idx * idaten::SVGFPathTracing::ShadowRayNum + i]; if (!shadowRay.isActive) { continue; } auto targetLightId = shadowRay.targetLightId; auto distToLight = shadowRay.distToLight; auto light = ctxt.lights[targetLightId]; auto lightobj = (light.objid >= 0 ? &ctxt.shapes[light.objid] : nullptr); real distHitObjToRayOrg = AT_MATH_INF; // Ray aim to the area light. // So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light. const aten::GeomParameter* hitobj = lightobj; aten::Intersection isectTmp; bool isHit = false; aten::ray r(shadowRay.rayorg, shadowRay.raydir); // TODO bool enableLod = (bounce >= 2); isHit = intersectCloser(&ctxt, r, &isectTmp, distToLight - AT_MATH_EPSILON, enableLod); if (isHit) { hitobj = &ctxt.shapes[isectTmp.objid]; } isHit = AT_NAME::scene::hitLight( isHit, light.attrib, lightobj, distToLight, distHitObjToRayOrg, isectTmp.t, hitobj); if (isHit) { auto contrib = shadowRay.lightcontrib; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); } } } __global__ void gather( idaten::TileDomain tileDomain, hipSurfaceObject_t dst, float4* aovColorVariance, float4* aovMomentTemporalWeight, const idaten::SVGFPathTracing::Path* __restrict__ paths, float4* contribs, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } auto idx = getIdx(ix, iy, tileDomain.w); float4 c = paths->contrib[idx].v; int sample = c.w; float3 contrib = make_float3(c.x, c.y, c.z) / sample; //contrib.w = sample; float lum = AT_NAME::color::luminance(contrib.x, contrib.y, contrib.z); ix += tileDomain.x; iy += tileDomain.y; idx = getIdx(ix, iy, width); aovMomentTemporalWeight[idx].x += lum * lum; aovMomentTemporalWeight[idx].y += lum; aovMomentTemporalWeight[idx].z += 1; aovColorVariance[idx] = make_float4(contrib.x, contrib.y, contrib.z, aovColorVariance[idx].w); contribs[idx] = c; #if 0 auto n = aovs[idx].moments.w; auto m = aovs[idx].moments / n; auto var = m.x - m.y * m.y; surf2Dwrite( make_float4(var, var, var, 1), dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); #else if (dst) { surf2Dwrite( make_float4(contrib, 0), dst, ix * sizeof(float4), iy, hipBoundaryModeTrap); } #endif } namespace idaten { void SVGFPathTracing::onGenPath( int sample, int maxSamples, int seed, hipTextureObject_t texVtxPos, hipTextureObject_t texVtxNml) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); bool isFillAOV = m_mode == Mode::AOVar; genPath << <grid, block, 0, m_stream >> > ( m_tileDomain, isFillAOV, m_paths.ptr(), m_rays.ptr(), m_tileDomain.w, m_tileDomain.h, sample, maxSamples, m_frame, m_cam.ptr(), m_sobolMatrices.ptr(), m_random.ptr()); checkCudaKernel(genPath); } void SVGFPathTracing::onHitTest( int width, int height, int bounce, hipTextureObject_t texVtxPos) { if (bounce == 0 && m_canSSRTHitTest) { onScreenSpaceHitTest(width, height, bounce, texVtxPos); } else { #ifdef ENABLE_PERSISTENT_THREAD hitTest << <NUM_BLOCK, dim3(WARP_SIZE, NUM_WARP_PER_BLOCK), 0, m_stream >> > ( #else dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); hitTest << <grid, block >> > ( #endif //hitTest << <1, 1 >> > ( m_tileDomain, m_paths.ptr(), m_isects.ptr(), m_rays.ptr(), m_hitbools.ptr(), width, height, m_shapeparam.ptr(), m_shapeparam.num(), m_lightparam.ptr(), m_lightparam.num(), m_nodetex.ptr(), m_primparams.ptr(), texVtxPos, m_mtxparams.ptr(), bounce, m_hitDistLimit); checkCudaKernel(hitTest); } } void SVGFPathTracing::onShadeMiss( int width, int height, int bounce, int offsetX/*= -1*/, int offsetY/*= -1*/) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); offsetX = offsetX < 0 ? m_tileDomain.x : offsetX; offsetY = offsetY < 0 ? m_tileDomain.y : offsetY; if (m_envmapRsc.idx >= 0) { shadeMissWithEnvmap << <grid, block, 0, m_stream >> > ( m_tileDomain, offsetX, offsetY, bounce, m_cam.ptr(), m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_tex.ptr(), m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer, m_paths.ptr(), m_rays.ptr(), width, height); } else { shadeMiss << <grid, block, 0, m_stream >> > ( m_tileDomain, bounce, m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_paths.ptr(), width, height); } checkCudaKernel(shadeMiss); } void SVGFPathTracing::onShade( hipSurfaceObject_t outputSurf, int width, int height, int bounce, int rrBounce, hipTextureObject_t texVtxPos, hipTextureObject_t texVtxNml) { m_mtxW2V.lookat( m_camParam.origin, m_camParam.center, m_camParam.up); m_mtxV2C.perspective( m_camParam.znear, m_camParam.zfar, m_camParam.vfov, m_camParam.aspect); m_mtxC2V = m_mtxV2C; m_mtxC2V.invert(); m_mtxV2W = m_mtxW2V; m_mtxV2W.invert(); aten::mat4 mtxW2C = m_mtxV2C * m_mtxW2V; dim3 blockPerGrid(((m_tileDomain.w * m_tileDomain.h) + 64 - 1) / 64); dim3 threadPerBlock(64); auto& hitcount = m_compaction.getCount(); int curaov = getCurAovs(); shade << <blockPerGrid, threadPerBlock, 0, m_stream >> > ( m_tileDomain, m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), mtxW2C, width, height, m_paths.ptr(), m_hitidx.ptr(), hitcount.ptr(), m_isects.ptr(), m_rays.ptr(), m_frame, bounce, rrBounce, m_shapeparam.ptr(), m_shapeparam.num(), m_mtrlparam.ptr(), m_lightparam.ptr(), m_lightparam.num(), m_primparams.ptr(), texVtxPos, texVtxNml, m_mtxparams.ptr(), m_tex.ptr(), m_random.ptr(), m_shadowRays.ptr()); checkCudaKernel(shade); hitShadowRay << <blockPerGrid, threadPerBlock, 0, m_stream >> > ( bounce, m_paths.ptr(), m_hitidx.ptr(), hitcount.ptr(), m_shadowRays.ptr(), m_shapeparam.ptr(), m_shapeparam.num(), m_mtrlparam.ptr(), m_lightparam.ptr(), m_lightparam.num(), m_nodetex.ptr(), m_primparams.ptr(), texVtxPos, m_mtxparams.ptr()); checkCudaKernel(hitShadowRay); } void SVGFPathTracing::onGather( hipSurfaceObject_t outputSurf, int width, int height, int maxSamples) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); gather << <grid, block, 0, m_stream >> > ( m_tileDomain, outputSurf, m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_paths.ptr(), m_tmpBuf.ptr(), width, height); checkCudaKernel(gather); } }
a5d37fd256f81e31876a5373a9efe7166619dedb.cu
#include "svgf/svgf.h" #include "kernel/StreamCompaction.h" #include "kernel/context.cuh" #include "kernel/light.cuh" #include "kernel/material.cuh" #include "kernel/intersect.cuh" #include "kernel/accelerator.cuh" #include "kernel/pt_common.h" #include "cuda/cudadefs.h" #include "cuda/helper_math.h" #include "cuda/cudautil.h" #include "cuda/cudamemory.h" #include "aten4idaten.h" #define ENABLE_PERSISTENT_THREAD __global__ void genPath( idaten::TileDomain tileDomain, bool isFillAOV, idaten::SVGFPathTracing::Path* paths, aten::ray* rays, int width, int height, int sample, int maxSamples, unsigned int frame, const aten::CameraParameter* __restrict__ camera, const void* samplerValues, const unsigned int* __restrict__ random) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= width || iy >= height) { return; } const auto idx = getIdx(ix, iy, width); paths->attrib[idx].isHit = false; if (paths->attrib[idx].isKill) { paths->attrib[idx].isTerminate = true; return; } #if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL auto scramble = random[idx] * 0x1fe3434f; paths->sampler[idx].init(frame, 0, scramble, samplerValues); #elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ auto rnd = random[idx]; auto scramble = rnd * 0x1fe3434f * ((frame + 133 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM)); paths->sampler[idx].init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 0, scramble); #endif float r1 = paths->sampler[idx].nextSample(); float r2 = paths->sampler[idx].nextSample(); if (isFillAOV) { r1 = r2 = 0.5f; } ix += tileDomain.x; iy += tileDomain.y; float s = (ix + r1) / (float)(camera->width); float t = (iy + r2) / (float)(camera->height); AT_NAME::CameraSampleResult camsample; AT_NAME::PinholeCamera::sample(&camsample, camera, s, t); rays[idx] = camsample.r; paths->throughput[idx].throughput = aten::vec3(1); paths->throughput[idx].pdfb = 0.0f; paths->attrib[idx].isTerminate = false; paths->attrib[idx].isSingular = false; paths->contrib[idx].samples += 1; // Accumulate value, so do not reset. //path.contrib = aten::vec3(0); } // NOTE // persistent thread. // https://gist.github.com/guozhou/b972bb42bbc5cba1f062#file-persistent-cpp-L15 // NOTE // compute capability 6.0 // http://homepages.math.uic.edu/~jan/mcs572/performance_considerations.pdf // p3 #define NUM_SM 64 // no. of streaming multiprocessors #define NUM_WARP_PER_SM 64 // maximum no. of resident warps per SM #define NUM_BLOCK_PER_SM 32 // maximum no. of resident blocks per SM #define NUM_BLOCK (NUM_SM * NUM_BLOCK_PER_SM) #define NUM_WARP_PER_BLOCK (NUM_WARP_PER_SM / NUM_BLOCK_PER_SM) #define WARP_SIZE 32 __device__ unsigned int g_headDev = 0; __global__ void hitTest( idaten::TileDomain tileDomain, idaten::SVGFPathTracing::Path* paths, aten::Intersection* isects, aten::ray* rays, int* hitbools, int width, int height, const aten::GeomParameter* __restrict__ shapes, int geomnum, const aten::LightParameter* __restrict__ lights, int lightnum, cudaTextureObject_t* nodes, const aten::PrimitiveParamter* __restrict__ prims, cudaTextureObject_t vtxPos, aten::mat4* matrices, int bounce, float hitDistLimit) { #ifdef ENABLE_PERSISTENT_THREAD // warp-wise head index of tasks in a block __shared__ volatile unsigned int headBlock[NUM_WARP_PER_BLOCK]; volatile unsigned int& headWarp = headBlock[threadIdx.y]; if (blockIdx.x == 0 && threadIdx.x == 0) { g_headDev = 0; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } do { // let lane 0 fetch [wh, wh + WARP_SIZE - 1] for a warp if (threadIdx.x == 0) { headWarp = atomicAdd(&g_headDev, WARP_SIZE); } // task index per thread in a warp unsigned int idx = headWarp + threadIdx.x; if (idx >= tileDomain.w * tileDomain.h) { return; } paths->attrib[idx].isHit = false; hitbools[idx] = 0; if (paths->attrib[idx].isTerminate) { continue; } aten::Intersection isect; float t_max = AT_MATH_INF; if (bounce >= 1 && !paths->attrib[idx].isSingular) { t_max = hitDistLimit; } // TODO // 近距離でVoxelにすると品質が落ちる. // しかも同じオブジェクト間だとそれが起こりやすい. //bool enableLod = (bounce >= 2); bool enableLod = false; int depth = 9; bool isHit = intersectClosest(&ctxt, rays[idx], &isect, t_max, enableLod, depth); #if 0 isects[idx].t = isect.t; isects[idx].objid = isect.objid; isects[idx].mtrlid = isect.mtrlid; isects[idx].meshid = isect.meshid; isects[idx].primid = isect.primid; isects[idx].a = isect.a; isects[idx].b = isect.b; #else isects[idx] = isect; #endif if (bounce >= 1 && !paths->attrib[idx].isSingular && isect.t > hitDistLimit) { isHit = false; paths->attrib[idx].isTerminate = true; } paths->attrib[idx].isHit = isHit; hitbools[idx] = isHit ? 1 : 0; } while (true); #else const auto ix = blockIdx.x * blockDim.x + threadIdx.x; const auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); paths->attrib[idx].isHit = false; hitbools[idx] = 0; if (paths->attrib[idx].isTerminate) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } aten::Intersection isect; float t_max = AT_MATH_INF; if (bounce >= 1 && !paths->attrib[idx].isSingular) { t_max = hitDistLimit; } bool isHit = intersectClosest(&ctxt, rays[idx], &isect, t_max); #if 0 isects[idx].t = isect.t; isects[idx].objid = isect.objid; isects[idx].mtrlid = isect.mtrlid; isects[idx].meshid = isect.meshid; isects[idx].area = isect.area; isects[idx].primid = isect.primid; isects[idx].a = isect.a; isects[idx].b = isect.b; #else isects[idx] = isect; #endif if (bounce >= 1 && !paths->attrib[idx].isSingular && isect.t > hitDistLimit) { isHit = false; } paths->attrib[idx].isHit = isHit; hitbools[idx] = isHit ? 1 : 0; #endif } __global__ void shadeMiss( idaten::TileDomain tileDomain, int bounce, float4* aovNormalDepth, float4* aovTexclrMeshid, idaten::SVGFPathTracing::Path* paths, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); if (!paths->attrib[idx].isTerminate && !paths->attrib[idx].isHit) { // TODO auto bg = aten::vec3(0); if (bounce == 0) { paths->attrib[idx].isKill = true; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // Export bg color to albedo buffer. aovTexclrMeshid[_idx] = make_float4(bg.x, bg.y, bg.z, -1); aovNormalDepth[_idx].w = -1; // For exporting separated albedo. bg = aten::vec3(1, 1, 1); } auto contrib = paths->throughput[idx].throughput * bg; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); paths->attrib[idx].isTerminate = true; } } __global__ void shadeMissWithEnvmap( idaten::TileDomain tileDomain, int offsetX, int offsetY, int bounce, const aten::CameraParameter* __restrict__ camera, float4* aovNormalDepth, float4* aovTexclrMeshid, cudaTextureObject_t* textures, int envmapIdx, real envmapAvgIllum, real envmapMultiplyer, idaten::SVGFPathTracing::Path* paths, const aten::ray* __restrict__ rays, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } const auto idx = getIdx(ix, iy, tileDomain.w); if (!paths->attrib[idx].isTerminate && !paths->attrib[idx].isHit) { aten::vec3 dir = rays[idx].dir; if (bounce == 0) { // Suppress jittering envrinment map. // So, re-sample ray without random. // TODO // More efficient way... float s = (ix + offsetX) / (float)(width); float t = (iy + offsetY) / (float)(height); AT_NAME::CameraSampleResult camsample; AT_NAME::PinholeCamera::sample(&camsample, camera, s, t); dir = camsample.r.dir; } auto uv = AT_NAME::envmap::convertDirectionToUV(dir); auto bg = tex2D<float4>(textures[envmapIdx], uv.x, uv.y); auto emit = aten::vec3(bg.x, bg.y, bg.z); float misW = 1.0f; if (bounce == 0 || (bounce == 1 && paths->attrib[idx].isSingular)) { paths->attrib[idx].isKill = true; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // Export envmap to albedo buffer. aovTexclrMeshid[_idx] = make_float4(emit.x, emit.y, emit.z, -1); aovNormalDepth[_idx].w = -1; // For exporting separated albedo. emit = aten::vec3(1, 1, 1); } else { auto pdfLight = AT_NAME::ImageBasedLight::samplePdf(emit, envmapAvgIllum); misW = paths->throughput[idx].pdfb / (pdfLight + paths->throughput[idx].pdfb); emit *= envmapMultiplyer; } auto contrib = paths->throughput[idx].throughput * misW * emit; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); paths->attrib[idx].isTerminate = true; } } __global__ void shade( idaten::TileDomain tileDomain, float4* aovNormalDepth, float4* aovTexclrMeshid, aten::mat4 mtxW2C, int width, int height, idaten::SVGFPathTracing::Path* paths, const int* __restrict__ hitindices, int* hitnum, const aten::Intersection* __restrict__ isects, aten::ray* rays, int frame, int bounce, int rrBounce, const aten::GeomParameter* __restrict__ shapes, int geomnum, const aten::MaterialParameter* __restrict__ mtrls, const aten::LightParameter* __restrict__ lights, int lightnum, const aten::PrimitiveParamter* __restrict__ prims, cudaTextureObject_t vtxPos, cudaTextureObject_t vtxNml, const aten::mat4* __restrict__ matrices, cudaTextureObject_t* textures, unsigned int* random, idaten::SVGFPathTracing::ShadowRay* shadowRays) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= *hitnum) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.mtrls = mtrls; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.vtxNml = vtxNml; ctxt.matrices = matrices; ctxt.textures = textures; } idx = hitindices[idx]; __shared__ idaten::SVGFPathTracing::ShadowRay shShadowRays[64 * idaten::SVGFPathTracing::ShadowRayNum]; __shared__ aten::MaterialParameter shMtrls[64]; const auto ray = rays[idx]; #if IDATEN_SAMPLER == IDATEN_SAMPLER_SOBOL auto scramble = random[idx] * 0x1fe3434f; paths->sampler[idx].init(frame, 4 + bounce * 300, scramble); #elif IDATEN_SAMPLER == IDATEN_SAMPLER_CMJ auto rnd = random[idx]; auto scramble = rnd * 0x1fe3434f * ((frame + 331 * rnd) / (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM)); paths->sampler[idx].init(frame % (aten::CMJ::CMJ_DIM * aten::CMJ::CMJ_DIM), 4 + bounce * 300, scramble); #endif aten::hitrecord rec; const auto& isect = isects[idx]; auto obj = &ctxt.shapes[isect.objid]; evalHitResult(&ctxt, obj, ray, &rec, &isect); bool isBackfacing = dot(rec.normal, -ray.dir) < 0.0f; // 交差位置の法線. // 物体からのレイの入出を考慮. aten::vec3 orienting_normal = rec.normal; if (rec.mtrlid >= 0) { shMtrls[threadIdx.x] = ctxt.mtrls[rec.mtrlid]; #if 1 if (rec.isVoxel) { // Replace to lambert. const auto& albedo = ctxt.mtrls[rec.mtrlid].baseColor; shMtrls[threadIdx.x] = aten::MaterialParameter(aten::MaterialType::Lambert, MaterialAttributeLambert); shMtrls[threadIdx.x].baseColor = albedo; } #endif if (shMtrls[threadIdx.x].type != aten::MaterialType::Layer) { shMtrls[threadIdx.x].albedoMap = (int)(shMtrls[threadIdx.x].albedoMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].albedoMap] : -1); shMtrls[threadIdx.x].normalMap = (int)(shMtrls[threadIdx.x].normalMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].normalMap] : -1); shMtrls[threadIdx.x].roughnessMap = (int)(shMtrls[threadIdx.x].roughnessMap >= 0 ? ctxt.textures[shMtrls[threadIdx.x].roughnessMap] : -1); } } else { // TODO shMtrls[threadIdx.x] = aten::MaterialParameter(aten::MaterialType::Lambert, MaterialAttributeLambert); shMtrls[threadIdx.x].baseColor = aten::vec3(1.0f); } // Render AOVs. // NOTE // 厳密に法線をAOVに保持するなら、法線マップ適用後するべき. // しかし、temporal reprojection、atrousなどのフィルタ適用時に法線を参照する際に、法線マップが細かすぎてはじかれてしまうことがある. // それにより、フィルタがおもったようにかからずフィルタの品質が下がってしまう問題が発生する. if (bounce == 0) { int ix = idx % tileDomain.w; int iy = idx / tileDomain.w; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // World coordinate to Clip coordinate. aten::vec4 pos = aten::vec4(rec.p, 1); pos = mtxW2C.apply(pos); // normal, depth aovNormalDepth[_idx] = make_float4(orienting_normal.x, orienting_normal.y, orienting_normal.z, pos.w); // texture color, meshid. auto texcolor = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1.0f)); #if 0 aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.meshid); #else aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.mtrlid); #endif // For exporting separated albedo. shMtrls[threadIdx.x].albedoMap = -1; } // TODO // How to deal Refraction? else if (bounce == 1 && paths->attrib[idx].mtrlType == aten::MaterialType::Specular) { int ix = idx % tileDomain.w; int iy = idx / tileDomain.w; ix += tileDomain.x; iy += tileDomain.y; const auto _idx = getIdx(ix, iy, width); // World coordinate to Clip coordinate. aten::vec4 pos = aten::vec4(rec.p, 1); pos = mtxW2C.apply(pos); // normal, depth aovNormalDepth[_idx] = make_float4(orienting_normal.x, orienting_normal.y, orienting_normal.z, pos.w); // texture color. auto texcolor = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1.0f)); #if 0 aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.meshid); #else aovTexclrMeshid[_idx] = make_float4(texcolor.x, texcolor.y, texcolor.z, isect.mtrlid); #endif // For exporting separated albedo. shMtrls[threadIdx.x].albedoMap = -1; } // Implicit conection to light. if (shMtrls[threadIdx.x].attrib.isEmissive) { if (!isBackfacing) { float weight = 1.0f; if (bounce > 0 && !paths->attrib[idx].isSingular) { auto cosLight = dot(orienting_normal, -ray.dir); auto dist2 = aten::squared_length(rec.p - ray.org); if (cosLight >= 0) { auto pdfLight = 1 / rec.area; // Convert pdf area to sradian. // http://www.slideshare.net/h013/edubpt-v100 // p31 - p35 pdfLight = pdfLight * dist2 / cosLight; weight = paths->throughput[idx].pdfb / (pdfLight + paths->throughput[idx].pdfb); } } auto contrib = paths->throughput[idx].throughput * weight * shMtrls[threadIdx.x].baseColor; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); } // When ray hit the light, tracing will finish. paths->attrib[idx].isTerminate = true; return; } if (!shMtrls[threadIdx.x].attrib.isTranslucent && isBackfacing) { orienting_normal = -orienting_normal; } // Apply normal map. int normalMap = shMtrls[threadIdx.x].normalMap; if (shMtrls[threadIdx.x].type == aten::MaterialType::Layer) { // 最表層の NormalMap を適用. auto* topmtrl = &ctxt.mtrls[shMtrls[threadIdx.x].layer[0]]; normalMap = (int)(topmtrl->normalMap >= 0 ? ctxt.textures[topmtrl->normalMap] : -1); } AT_NAME::applyNormalMap(normalMap, orienting_normal, orienting_normal, rec.u, rec.v); auto albedo = AT_NAME::sampleTexture(shMtrls[threadIdx.x].albedoMap, rec.u, rec.v, aten::vec3(1), bounce); #if 1 #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].isActive = false; } // Explicit conection to light. if (!(shMtrls[threadIdx.x].attrib.isSingular || shMtrls[threadIdx.x].attrib.isTranslucent)) { auto shadowRayOrg = rec.p + AT_MATH_EPSILON * orienting_normal; for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { real lightSelectPdf = 1; aten::LightSampleResult sampleres; // TODO // Importance sampling. int lightidx = aten::cmpMin<int>(paths->sampler[idx].nextSample() * lightnum, lightnum - 1); lightSelectPdf = 1.0f / lightnum; aten::LightParameter light; light.pos = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 0]; light.dir = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 1]; light.le = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 2]; light.v0 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 3]; light.v1 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 4]; light.v2 = ((aten::vec4*)ctxt.lights)[lightidx * aten::LightParameter_float4_size + 5]; //auto light = ctxt.lights[lightidx]; sampleLight(&sampleres, &ctxt, &light, rec.p, orienting_normal, &paths->sampler[idx], bounce); const auto& posLight = sampleres.pos; const auto& nmlLight = sampleres.nml; real pdfLight = sampleres.pdf; auto dirToLight = normalize(sampleres.dir); auto distToLight = length(posLight - rec.p); auto tmp = rec.p + dirToLight - shadowRayOrg; auto shadowRayDir = normalize(tmp); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].isActive = true; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].rayorg = shadowRayOrg; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].raydir = shadowRayDir; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].targetLightId = lightidx; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].distToLight = distToLight; shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = aten::vec3(0); { auto cosShadow = dot(orienting_normal, dirToLight); real pdfb = samplePDF(&ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, dirToLight, rec.u, rec.v); auto bsdf = sampleBSDF(&ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, dirToLight, rec.u, rec.v, albedo); bsdf *= paths->throughput[idx].throughput; // Get light color. auto emit = sampleres.finalColor; if (light.attrib.isSingular || light.attrib.isInfinite) { if (pdfLight > real(0) && cosShadow >= 0) { // TODO // ジオメトリタームの扱いについて. // singular light の場合は、finalColor に距離の除算が含まれている. // inifinite light の場合は、無限遠方になり、pdfLightに含まれる距離成分と打ち消しあう?. // (打ち消しあうので、pdfLightには距離成分は含んでいない). auto misW = pdfLight / (pdfb + pdfLight); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = (misW * bsdf * emit * cosShadow / pdfLight) / lightSelectPdf / (float)idaten::SVGFPathTracing::ShadowRayNum; } } else { auto cosLight = dot(nmlLight, -dirToLight); if (cosShadow >= 0 && cosLight >= 0) { auto dist2 = aten::squared_length(sampleres.dir); auto G = cosShadow * cosLight / dist2; if (pdfb > real(0) && pdfLight > real(0)) { // Convert pdf from steradian to area. // http://www.slideshare.net/h013/edubpt-v100 // p31 - p35 pdfb = pdfb * cosLight / dist2; auto misW = pdfLight / (pdfb + pdfLight); shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i].lightcontrib = (misW * (bsdf * emit * G) / pdfLight) / lightSelectPdf / (float)idaten::SVGFPathTracing::ShadowRayNum;; } } } } } } #endif real russianProb = real(1); if (bounce > rrBounce) { auto t = normalize(paths->throughput[idx].throughput); auto p = aten::cmpMax(t.r, aten::cmpMax(t.g, t.b)); russianProb = paths->sampler[idx].nextSample(); if (russianProb >= p) { //shPaths[threadIdx.x].contrib = aten::vec3(0); paths->attrib[idx].isTerminate = true; } else { russianProb = max(p, 0.01f); } } AT_NAME::MaterialSampling sampling; sampleMaterial( &sampling, &ctxt, &shMtrls[threadIdx.x], orienting_normal, ray.dir, rec.normal, &paths->sampler[idx], rec.u, rec.v, albedo); auto nextDir = normalize(sampling.dir); auto pdfb = sampling.pdf; auto bsdf = sampling.bsdf; real c = 1; if (!shMtrls[threadIdx.x].attrib.isSingular) { // TODO // AMDのはabsしているが.... c = aten::abs(dot(orienting_normal, nextDir)); //c = dot(orienting_normal, nextDir); } if (pdfb > 0 && c > 0) { paths->throughput[idx].throughput *= bsdf * c / pdfb; paths->throughput[idx].throughput /= russianProb; } else { paths->attrib[idx].isTerminate = true; } // Make next ray. rays[idx] = aten::ray(rec.p, nextDir); paths->throughput[idx].pdfb = pdfb; paths->attrib[idx].isSingular = shMtrls[threadIdx.x].attrib.isSingular; paths->attrib[idx].mtrlType = shMtrls[threadIdx.x].type; #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { shadowRays[idx * idaten::SVGFPathTracing::ShadowRayNum + i] = shShadowRays[threadIdx.x * idaten::SVGFPathTracing::ShadowRayNum + i]; } } __global__ void hitShadowRay( int bounce, idaten::SVGFPathTracing::Path* paths, int* hitindices, int* hitnum, const idaten::SVGFPathTracing::ShadowRay* __restrict__ shadowRays, const aten::GeomParameter* __restrict__ shapes, int geomnum, aten::MaterialParameter* mtrls, const aten::LightParameter* __restrict__ lights, int lightnum, cudaTextureObject_t* nodes, const aten::PrimitiveParamter* __restrict__ prims, cudaTextureObject_t vtxPos, const aten::mat4* __restrict__ matrices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= *hitnum) { return; } Context ctxt; { ctxt.geomnum = geomnum; ctxt.shapes = shapes; ctxt.mtrls = mtrls; ctxt.lightnum = lightnum; ctxt.lights = lights; ctxt.nodes = nodes; ctxt.prims = prims; ctxt.vtxPos = vtxPos; ctxt.matrices = matrices; } idx = hitindices[idx]; #pragma unroll for (int i = 0; i < idaten::SVGFPathTracing::ShadowRayNum; i++) { const auto& shadowRay = shadowRays[idx * idaten::SVGFPathTracing::ShadowRayNum + i]; if (!shadowRay.isActive) { continue; } auto targetLightId = shadowRay.targetLightId; auto distToLight = shadowRay.distToLight; auto light = ctxt.lights[targetLightId]; auto lightobj = (light.objid >= 0 ? &ctxt.shapes[light.objid] : nullptr); real distHitObjToRayOrg = AT_MATH_INF; // Ray aim to the area light. // So, if ray doesn't hit anything in intersectCloserBVH, ray hit the area light. const aten::GeomParameter* hitobj = lightobj; aten::Intersection isectTmp; bool isHit = false; aten::ray r(shadowRay.rayorg, shadowRay.raydir); // TODO bool enableLod = (bounce >= 2); isHit = intersectCloser(&ctxt, r, &isectTmp, distToLight - AT_MATH_EPSILON, enableLod); if (isHit) { hitobj = &ctxt.shapes[isectTmp.objid]; } isHit = AT_NAME::scene::hitLight( isHit, light.attrib, lightobj, distToLight, distHitObjToRayOrg, isectTmp.t, hitobj); if (isHit) { auto contrib = shadowRay.lightcontrib; paths->contrib[idx].contrib += make_float3(contrib.x, contrib.y, contrib.z); } } } __global__ void gather( idaten::TileDomain tileDomain, cudaSurfaceObject_t dst, float4* aovColorVariance, float4* aovMomentTemporalWeight, const idaten::SVGFPathTracing::Path* __restrict__ paths, float4* contribs, int width, int height) { auto ix = blockIdx.x * blockDim.x + threadIdx.x; auto iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix >= tileDomain.w || iy >= tileDomain.h) { return; } auto idx = getIdx(ix, iy, tileDomain.w); float4 c = paths->contrib[idx].v; int sample = c.w; float3 contrib = make_float3(c.x, c.y, c.z) / sample; //contrib.w = sample; float lum = AT_NAME::color::luminance(contrib.x, contrib.y, contrib.z); ix += tileDomain.x; iy += tileDomain.y; idx = getIdx(ix, iy, width); aovMomentTemporalWeight[idx].x += lum * lum; aovMomentTemporalWeight[idx].y += lum; aovMomentTemporalWeight[idx].z += 1; aovColorVariance[idx] = make_float4(contrib.x, contrib.y, contrib.z, aovColorVariance[idx].w); contribs[idx] = c; #if 0 auto n = aovs[idx].moments.w; auto m = aovs[idx].moments / n; auto var = m.x - m.y * m.y; surf2Dwrite( make_float4(var, var, var, 1), dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); #else if (dst) { surf2Dwrite( make_float4(contrib, 0), dst, ix * sizeof(float4), iy, cudaBoundaryModeTrap); } #endif } namespace idaten { void SVGFPathTracing::onGenPath( int sample, int maxSamples, int seed, cudaTextureObject_t texVtxPos, cudaTextureObject_t texVtxNml) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); bool isFillAOV = m_mode == Mode::AOVar; genPath << <grid, block, 0, m_stream >> > ( m_tileDomain, isFillAOV, m_paths.ptr(), m_rays.ptr(), m_tileDomain.w, m_tileDomain.h, sample, maxSamples, m_frame, m_cam.ptr(), m_sobolMatrices.ptr(), m_random.ptr()); checkCudaKernel(genPath); } void SVGFPathTracing::onHitTest( int width, int height, int bounce, cudaTextureObject_t texVtxPos) { if (bounce == 0 && m_canSSRTHitTest) { onScreenSpaceHitTest(width, height, bounce, texVtxPos); } else { #ifdef ENABLE_PERSISTENT_THREAD hitTest << <NUM_BLOCK, dim3(WARP_SIZE, NUM_WARP_PER_BLOCK), 0, m_stream >> > ( #else dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); hitTest << <grid, block >> > ( #endif //hitTest << <1, 1 >> > ( m_tileDomain, m_paths.ptr(), m_isects.ptr(), m_rays.ptr(), m_hitbools.ptr(), width, height, m_shapeparam.ptr(), m_shapeparam.num(), m_lightparam.ptr(), m_lightparam.num(), m_nodetex.ptr(), m_primparams.ptr(), texVtxPos, m_mtxparams.ptr(), bounce, m_hitDistLimit); checkCudaKernel(hitTest); } } void SVGFPathTracing::onShadeMiss( int width, int height, int bounce, int offsetX/*= -1*/, int offsetY/*= -1*/) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); offsetX = offsetX < 0 ? m_tileDomain.x : offsetX; offsetY = offsetY < 0 ? m_tileDomain.y : offsetY; if (m_envmapRsc.idx >= 0) { shadeMissWithEnvmap << <grid, block, 0, m_stream >> > ( m_tileDomain, offsetX, offsetY, bounce, m_cam.ptr(), m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_tex.ptr(), m_envmapRsc.idx, m_envmapRsc.avgIllum, m_envmapRsc.multiplyer, m_paths.ptr(), m_rays.ptr(), width, height); } else { shadeMiss << <grid, block, 0, m_stream >> > ( m_tileDomain, bounce, m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), m_paths.ptr(), width, height); } checkCudaKernel(shadeMiss); } void SVGFPathTracing::onShade( cudaSurfaceObject_t outputSurf, int width, int height, int bounce, int rrBounce, cudaTextureObject_t texVtxPos, cudaTextureObject_t texVtxNml) { m_mtxW2V.lookat( m_camParam.origin, m_camParam.center, m_camParam.up); m_mtxV2C.perspective( m_camParam.znear, m_camParam.zfar, m_camParam.vfov, m_camParam.aspect); m_mtxC2V = m_mtxV2C; m_mtxC2V.invert(); m_mtxV2W = m_mtxW2V; m_mtxV2W.invert(); aten::mat4 mtxW2C = m_mtxV2C * m_mtxW2V; dim3 blockPerGrid(((m_tileDomain.w * m_tileDomain.h) + 64 - 1) / 64); dim3 threadPerBlock(64); auto& hitcount = m_compaction.getCount(); int curaov = getCurAovs(); shade << <blockPerGrid, threadPerBlock, 0, m_stream >> > ( m_tileDomain, m_aovNormalDepth[curaov].ptr(), m_aovTexclrMeshid[curaov].ptr(), mtxW2C, width, height, m_paths.ptr(), m_hitidx.ptr(), hitcount.ptr(), m_isects.ptr(), m_rays.ptr(), m_frame, bounce, rrBounce, m_shapeparam.ptr(), m_shapeparam.num(), m_mtrlparam.ptr(), m_lightparam.ptr(), m_lightparam.num(), m_primparams.ptr(), texVtxPos, texVtxNml, m_mtxparams.ptr(), m_tex.ptr(), m_random.ptr(), m_shadowRays.ptr()); checkCudaKernel(shade); hitShadowRay << <blockPerGrid, threadPerBlock, 0, m_stream >> > ( bounce, m_paths.ptr(), m_hitidx.ptr(), hitcount.ptr(), m_shadowRays.ptr(), m_shapeparam.ptr(), m_shapeparam.num(), m_mtrlparam.ptr(), m_lightparam.ptr(), m_lightparam.num(), m_nodetex.ptr(), m_primparams.ptr(), texVtxPos, m_mtxparams.ptr()); checkCudaKernel(hitShadowRay); } void SVGFPathTracing::onGather( cudaSurfaceObject_t outputSurf, int width, int height, int maxSamples) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid( (m_tileDomain.w + block.x - 1) / block.x, (m_tileDomain.h + block.y - 1) / block.y); int curaov = getCurAovs(); gather << <grid, block, 0, m_stream >> > ( m_tileDomain, outputSurf, m_aovColorVariance[curaov].ptr(), m_aovMomentTemporalWeight[curaov].ptr(), m_paths.ptr(), m_tmpBuf.ptr(), width, height); checkCudaKernel(gather); } }
004d50bbfeef3be24832c1e664f0eddaa1162911.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple_hip.cuh" #include <cudf/reduction/detail/reduction_functions.hpp> namespace cudf { namespace reduction { namespace detail { std::unique_ptr<cudf::column> segmented_all( column_view const& col, device_span<size_type const> offsets, cudf::data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8), "segmented_all() operation requires output type `BOOL8`"); using reducer = simple::detail::bool_result_column_dispatcher<op::min>; // A minimum over bool types is used to implement all() return cudf::type_dispatcher( col.type(), reducer{}, col, offsets, null_handling, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
004d50bbfeef3be24832c1e664f0eddaa1162911.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "simple.cuh" #include <cudf/reduction/detail/reduction_functions.hpp> namespace cudf { namespace reduction { namespace detail { std::unique_ptr<cudf::column> segmented_all( column_view const& col, device_span<size_type const> offsets, cudf::data_type const output_dtype, null_policy null_handling, std::optional<std::reference_wrapper<scalar const>> init, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(output_dtype == cudf::data_type(cudf::type_id::BOOL8), "segmented_all() operation requires output type `BOOL8`"); using reducer = simple::detail::bool_result_column_dispatcher<op::min>; // A minimum over bool types is used to implement all() return cudf::type_dispatcher( col.type(), reducer{}, col, offsets, null_handling, init, stream, mr); } } // namespace detail } // namespace reduction } // namespace cudf
c0f48c21a719b8f0a63a291e516b8f04de88c26e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "tsne.h" #include "matrix/CuSparseMatrix.cu" #include "../knn/knn.h" using namespace std; typedef DeviceDenseMatrix<double> Mat; __global__ void QKernel(Mat Q, Mat Y); __global__ void GKernel(Mat G, Mat P, Mat Q, double QNorm, Mat Y); void tsne(double* Y, int* landmarks, int newRows, int newCols, const double* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, const int* belong, int classes, int perplexity, int max_itr, unsigned int seed) { printf("tsne..."); assert(newCols < cols && newRows >= classes); srand(seed); if (classes > 0) { vector<vector<int> *> staff(classes); for (int i = 0; i < classes; ++i) staff[i] = new vector<int>(); printf("here"); for (int i = 0; i < rows; ++i) staff[belong[i]]->push_back(i); printf("hi"); // make sure every class has at least one member in sample for (int i = 0; i < classes; ++i) if (staff[i]->size() > 0) { int reserved = rand() % staff[i]->size(); landmarks[i] = staff[i]->at(reserved); swap(staff[i]->at(reserved), *(staff[i]->end() - 1)); staff[i]->pop_back(); } printf("hi"); int i = 0; int j = -1; int k = classes; while (k < rows) { //advance j++; while (j >= staff[i]->size()) { i++; j = 0; } //Reservoir Sampling if (k < newRows) { landmarks[k] = staff[i]->at(j); } else { if (double(rand()) / RAND_MAX < double(newRows - classes) / (k - classes + 1)) { landmarks[classes + rand() % (newRows - classes)] = k; } } k++; } } else { int i = 0; while (i < rows) { //advance //Reservoir Sampling if (i < newRows) { landmarks[i] = i; } else { if (double(rand()) / RAND_MAX < double(newRows) / (i + 1 - classes + 1)) { landmarks[rand() % newRows] = i; } } i++; } } cout << "landmarks:" << endl; for (int i = 0; i < newRows; ++i) { cout << landmarks[i] << '\t'; } cout << endl; /* Loss Function: C = KL(P|Q) = \Sum_i\Sum_j Pij*log(Pij/Qij) P[N,M] : joint prob (Gussian) distribution of original data Q[N,K] : joint prob (Student t-) distribution of mapped data(K << M) Pij = Pji = (Pj|i + Pi|j)/2/N Pj|i = e^(-|Xi-Xj|^2/2/var_i^2) / Norm Qij = Qji = (1 + |Yi-Yj|^2)^-1 / Norm Pii = Qii = 0 Gradient Desending: Yt = Yt-1 + lr * grad + momentum * (Yt-1 - Yt-2) grad[C,Yi] = 4 * \Sum_j (Pij-Qij) * (Yi-Yj) * (1+|Yi-Yj|^2)^-1 */ vector<double> var(newRows); DistMatrix<double> D(newRows); // knn(D.data, data, index, row_ptr, rows, cols, nnz, landmarks, newRows); cout << "D" << endl; cout << D << endl; double eps = pow(2., -52.); double logPerplexity = log(perplexity); double tol = 1e-6; CDenseMatrix<double> P(newRows, newRows); double INF = 3e+300; vector<double> perp(newRows); for (int i = 0; i < newRows; ++i) { double l = -INF; double r = INF; //todo double mid = 1; for (int j = 0; j < 50; ++j) { // printf("var_i %e\n", var_i); double Norm = 0; perp[i] = 0; for (int j = 0; j < newRows; ++j) { if (j == i) continue; P.at(i, j) = exp(-D.at(i, j) * D.at(i, j) * mid); //todo Norm += P.at(i, j); perp[i] += D.at(i, j) * P.at(i, j); } perp[i] = log(Norm) + mid * perp[i] / Norm; // if (perp[i] != perp[i]) { // cout << "hi " << Norm << '\t' << mid << '\t' << perp[i] << endl; // for (int j = 0; j < newRows; ++j) { // cout << D.at(i, j) << endl; // } // return; // } for (int j = 0; j < newRows; ++j) P.at(i, j) /= Norm; // printf("%d perp %e mid %e l %e r %e\n", i, exp(perp), mid, l, r); double diff = perp[i] - logPerplexity; if (abs(diff) < tol) break; if (diff < 0 || perp[i] != perp[i] /* Norm ~= 0 */ ) { r = mid; mid = l == -INF ? r / 2 : (l + r) / 2; } else { l = mid; mid = r == INF ? l * 2 : (l + r) / 2; } } printf("perp_%d %e precision %e var %e\n", i, exp(perp[i]), mid, sqrt(1. / 2 / mid)); } double sumP = 0.; for (int i = 0; i < newRows; ++i) { P.at(i, i) = 0.; for (int j = i + 1; j < newRows; ++j) { P.at(i, j) = P.at(j, i) = (P.at(i, j) + P.at(j, i)) * 0.5; sumP += P.at(i, j) * 2; } } for (int i = 0; i < newRows; ++i) { for (int j = 0; j < newRows; ++j) P.at(i, j) = max(P.at(i, j) / sumP, eps); } cout << "P" << endl; cout << P << endl; Mat d_P(newRows, newRows); d_P = P; d_P *= 12.; double init_mean = 0.; double init_var = 1e-4; //Box-Muller Transform for (int i = 0; i < newRows * newCols; i += 2) { double u1 = (double)rand() / RAND_MAX; double u2 = (double)rand() / RAND_MAX; Y[i] = sqrt(-2. * log(u1)) * cos(2. * 3.14 * u2) * init_var + init_mean; if (i + 1 < newRows * newCols) Y[i + 1] = sqrt(-2. * log(u1)) * sin(2. * 3.14 * u2) * init_var + init_mean; } CDenseMatrix<double> h_Y(Y, newRows, newCols); Mat* d_Y[3]; for (int i = 0; i < 3; ++i) { d_Y[i] = new Mat(newRows, newCols); *d_Y[i] = h_Y; } Mat d_Q(newRows, newRows); Mat d_G(newRows, newCols); double lr = 0.1; double momentum = 0.5; int t = 2; Mat d_gains(newRows, newCols); d_gains = 1.; Mat d_incs(newRows, newCols); d_incs = 0.; double min_gain = 0.01; double epsilon = 200; printf("Iteration\tCost(KL-devergence)\n"); for (int itr = 0; itr < max_itr; ++itr) { int threads1 = 16 * 16; int blocks1 = (newRows + threads1 - 1) / threads1; hipLaunchKernelGGL(( QKernel), dim3(blocks1), dim3(threads1), 0, 0, d_Q, *d_Y[t]); checkCudaErrors(hipDeviceSynchronize()); double QNorm = sum(d_Q); d_Q /= QNorm; d_Q = maximum(d_Q, eps); int threads2 = 16 * 16; int blocks2 = (newRows + threads2 - 1) / threads2; hipLaunchKernelGGL(( GKernel), dim3(blocks2), dim3(threads2), 0, 0, d_G, d_P, d_Q, QNorm, *d_Y[t]); checkCudaErrors(hipDeviceSynchronize()); // *d_Y[t] = *d_Y[(t - 1 + 3) % 3] - d_G * lr // + (*d_Y[(t - 1 + 3) % 3] - *d_Y[(t - 2 + 3) % 3]) * momentum; // if (itr < max_itr - 1) t = (t - 2 + 3) % 3; d_gains = (d_gains + 0.2) * (sign(d_G) != sign(d_incs)) + d_gains * 0.8 * (sign(d_G) == sign(d_incs)); d_gains = maximum(d_gains, min_gain); d_incs = d_incs * momentum - d_gains * d_G * epsilon; *d_Y[t] += d_incs; if (itr % 10 == 0) { d_Q = d_P * (log(d_P) - log(d_Q)); double cost = sum(d_Q); printf("%d\t%.30f\n", itr, cost); } if (itr > 250) { momentum = 0.8; } if (itr == 100) { d_P /= 12.; } } (*d_Y[t]).toHost(Y); } __global__ void QKernel(Mat Q, Mat Y) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int i = tid; for (int j = 0; j < Q.cols; ++j) { double d = 0; for (int k = 0; k < Y.cols; ++k) d += (Y.at(i, k) -Y.at(j, k)) * (Y.at(i, k) -Y.at(j, k)); Q.at(i, j) = 1. / (1. + d); } Q.at(i, i) = 0.; } //grad[C,Yi] = 4 * \Sum_j (Pij-Qij) * (Yi-Yj) * (1+|Yi-Yj|^2)^-1 __global__ void GKernel(Mat G, Mat P, Mat Q, double QNorm, Mat Y) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int i = tid; for (int k = 0; k < G.cols; ++k) { double g = 0; for (int j = 0; j < G.rows; ++j) g += (P.at(i, j) - Q.at(i, j)) * (Y.at(i, k) - Y.at(j, k)) * Q.at(i, j) * QNorm; G.at(i, k) = g * 4.; } }
c0f48c21a719b8f0a63a291e516b8f04de88c26e.cu
#include "tsne.h" #include "matrix/CuSparseMatrix.cu" #include "../knn/knn.h" using namespace std; typedef DeviceDenseMatrix<double> Mat; __global__ void QKernel(Mat Q, Mat Y); __global__ void GKernel(Mat G, Mat P, Mat Q, double QNorm, Mat Y); void tsne(double* Y, int* landmarks, int newRows, int newCols, const double* data, const int* index, const int* row_ptr, int rows, int cols, int nnz, const int* belong, int classes, int perplexity, int max_itr, unsigned int seed) { printf("tsne..."); assert(newCols < cols && newRows >= classes); srand(seed); if (classes > 0) { vector<vector<int> *> staff(classes); for (int i = 0; i < classes; ++i) staff[i] = new vector<int>(); printf("here"); for (int i = 0; i < rows; ++i) staff[belong[i]]->push_back(i); printf("hi"); // make sure every class has at least one member in sample for (int i = 0; i < classes; ++i) if (staff[i]->size() > 0) { int reserved = rand() % staff[i]->size(); landmarks[i] = staff[i]->at(reserved); swap(staff[i]->at(reserved), *(staff[i]->end() - 1)); staff[i]->pop_back(); } printf("hi"); int i = 0; int j = -1; int k = classes; while (k < rows) { //advance j++; while (j >= staff[i]->size()) { i++; j = 0; } //Reservoir Sampling if (k < newRows) { landmarks[k] = staff[i]->at(j); } else { if (double(rand()) / RAND_MAX < double(newRows - classes) / (k - classes + 1)) { landmarks[classes + rand() % (newRows - classes)] = k; } } k++; } } else { int i = 0; while (i < rows) { //advance //Reservoir Sampling if (i < newRows) { landmarks[i] = i; } else { if (double(rand()) / RAND_MAX < double(newRows) / (i + 1 - classes + 1)) { landmarks[rand() % newRows] = i; } } i++; } } cout << "landmarks:" << endl; for (int i = 0; i < newRows; ++i) { cout << landmarks[i] << '\t'; } cout << endl; /* Loss Function: C = KL(P|Q) = \Sum_i\Sum_j Pij*log(Pij/Qij) P[N,M] : joint prob (Gussian) distribution of original data Q[N,K] : joint prob (Student t-) distribution of mapped data(K << M) Pij = Pji = (Pj|i + Pi|j)/2/N Pj|i = e^(-|Xi-Xj|^2/2/var_i^2) / Norm Qij = Qji = (1 + |Yi-Yj|^2)^-1 / Norm Pii = Qii = 0 Gradient Desending: Yt = Yt-1 + lr * grad + momentum * (Yt-1 - Yt-2) grad[C,Yi] = 4 * \Sum_j (Pij-Qij) * (Yi-Yj) * (1+|Yi-Yj|^2)^-1 */ vector<double> var(newRows); DistMatrix<double> D(newRows); // knn(D.data, data, index, row_ptr, rows, cols, nnz, landmarks, newRows); cout << "D" << endl; cout << D << endl; double eps = pow(2., -52.); double logPerplexity = log(perplexity); double tol = 1e-6; CDenseMatrix<double> P(newRows, newRows); double INF = 3e+300; vector<double> perp(newRows); for (int i = 0; i < newRows; ++i) { double l = -INF; double r = INF; //todo double mid = 1; for (int j = 0; j < 50; ++j) { // printf("var_i %e\n", var_i); double Norm = 0; perp[i] = 0; for (int j = 0; j < newRows; ++j) { if (j == i) continue; P.at(i, j) = exp(-D.at(i, j) * D.at(i, j) * mid); //todo Norm += P.at(i, j); perp[i] += D.at(i, j) * P.at(i, j); } perp[i] = log(Norm) + mid * perp[i] / Norm; // if (perp[i] != perp[i]) { // cout << "hi " << Norm << '\t' << mid << '\t' << perp[i] << endl; // for (int j = 0; j < newRows; ++j) { // cout << D.at(i, j) << endl; // } // return; // } for (int j = 0; j < newRows; ++j) P.at(i, j) /= Norm; // printf("%d perp %e mid %e l %e r %e\n", i, exp(perp), mid, l, r); double diff = perp[i] - logPerplexity; if (abs(diff) < tol) break; if (diff < 0 || perp[i] != perp[i] /* Norm ~= 0 */ ) { r = mid; mid = l == -INF ? r / 2 : (l + r) / 2; } else { l = mid; mid = r == INF ? l * 2 : (l + r) / 2; } } printf("perp_%d %e precision %e var %e\n", i, exp(perp[i]), mid, sqrt(1. / 2 / mid)); } double sumP = 0.; for (int i = 0; i < newRows; ++i) { P.at(i, i) = 0.; for (int j = i + 1; j < newRows; ++j) { P.at(i, j) = P.at(j, i) = (P.at(i, j) + P.at(j, i)) * 0.5; sumP += P.at(i, j) * 2; } } for (int i = 0; i < newRows; ++i) { for (int j = 0; j < newRows; ++j) P.at(i, j) = max(P.at(i, j) / sumP, eps); } cout << "P" << endl; cout << P << endl; Mat d_P(newRows, newRows); d_P = P; d_P *= 12.; double init_mean = 0.; double init_var = 1e-4; //Box-Muller Transform for (int i = 0; i < newRows * newCols; i += 2) { double u1 = (double)rand() / RAND_MAX; double u2 = (double)rand() / RAND_MAX; Y[i] = sqrt(-2. * log(u1)) * cos(2. * 3.14 * u2) * init_var + init_mean; if (i + 1 < newRows * newCols) Y[i + 1] = sqrt(-2. * log(u1)) * sin(2. * 3.14 * u2) * init_var + init_mean; } CDenseMatrix<double> h_Y(Y, newRows, newCols); Mat* d_Y[3]; for (int i = 0; i < 3; ++i) { d_Y[i] = new Mat(newRows, newCols); *d_Y[i] = h_Y; } Mat d_Q(newRows, newRows); Mat d_G(newRows, newCols); double lr = 0.1; double momentum = 0.5; int t = 2; Mat d_gains(newRows, newCols); d_gains = 1.; Mat d_incs(newRows, newCols); d_incs = 0.; double min_gain = 0.01; double epsilon = 200; printf("Iteration\tCost(KL-devergence)\n"); for (int itr = 0; itr < max_itr; ++itr) { int threads1 = 16 * 16; int blocks1 = (newRows + threads1 - 1) / threads1; QKernel<<<blocks1, threads1>>>(d_Q, *d_Y[t]); checkCudaErrors(cudaDeviceSynchronize()); double QNorm = sum(d_Q); d_Q /= QNorm; d_Q = maximum(d_Q, eps); int threads2 = 16 * 16; int blocks2 = (newRows + threads2 - 1) / threads2; GKernel<<<blocks2, threads2>>>(d_G, d_P, d_Q, QNorm, *d_Y[t]); checkCudaErrors(cudaDeviceSynchronize()); // *d_Y[t] = *d_Y[(t - 1 + 3) % 3] - d_G * lr // + (*d_Y[(t - 1 + 3) % 3] - *d_Y[(t - 2 + 3) % 3]) * momentum; // if (itr < max_itr - 1) t = (t - 2 + 3) % 3; d_gains = (d_gains + 0.2) * (sign(d_G) != sign(d_incs)) + d_gains * 0.8 * (sign(d_G) == sign(d_incs)); d_gains = maximum(d_gains, min_gain); d_incs = d_incs * momentum - d_gains * d_G * epsilon; *d_Y[t] += d_incs; if (itr % 10 == 0) { d_Q = d_P * (log(d_P) - log(d_Q)); double cost = sum(d_Q); printf("%d\t%.30f\n", itr, cost); } if (itr > 250) { momentum = 0.8; } if (itr == 100) { d_P /= 12.; } } (*d_Y[t]).toHost(Y); } __global__ void QKernel(Mat Q, Mat Y) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int i = tid; for (int j = 0; j < Q.cols; ++j) { double d = 0; for (int k = 0; k < Y.cols; ++k) d += (Y.at(i, k) -Y.at(j, k)) * (Y.at(i, k) -Y.at(j, k)); Q.at(i, j) = 1. / (1. + d); } Q.at(i, i) = 0.; } //grad[C,Yi] = 4 * \Sum_j (Pij-Qij) * (Yi-Yj) * (1+|Yi-Yj|^2)^-1 __global__ void GKernel(Mat G, Mat P, Mat Q, double QNorm, Mat Y) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int i = tid; for (int k = 0; k < G.cols; ++k) { double g = 0; for (int j = 0; j < G.rows; ++j) g += (P.at(i, j) - Q.at(i, j)) * (Y.at(i, k) - Y.at(j, k)) * Q.at(i, j) * QNorm; G.at(i, k) = g * 4.; } }
dd214c97705e5b45f7ad290ae486c172aee029ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "common_magma.h" //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void zgeellrtmv_kernel_32( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void zgeellrtmv_kernel_16( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void zgeellrtmv_kernel_8( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha magmaDoubleComplex scalar alpha @param[in] dval magmaDoubleComplex_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar beta @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = ( (m+blocksize-1)/blocksize); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment) *alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (double) num_blocks ); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaDoubleComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { hipLaunchKernelGGL(( zgeellrtmv_kernel_32), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { hipLaunchKernelGGL(( zgeellrtmv_kernel_16), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { hipLaunchKernelGGL(( zgeellrtmv_kernel_8), dim3(grid), dim3(threads) , Ms, queue , m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
dd214c97705e5b45f7ad290ae486c172aee029ff.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @precisions normal z -> c d s */ #include "common_magma.h" //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void zgeellrtmv_kernel_32( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void zgeellrtmv_kernel_16( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void zgeellrtmv_kernel_8( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magma_index_t * drowlength, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaDoubleComplex shared[]; if(i < num_rows ){ magmaDoubleComplex dot = MAGMA_Z_MAKE(0.0, 0.0); int max_ = (drowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaDoubleComplex val = dval[ k*(T*alignment)+(i*T)+idp ]; //int col = dcolind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaDoubleComplex val = dval[ k*(T)+(i*alignment)+idp ]; int col = dcolind [ k*(T)+(i*alignment)+idp ]; dot += val * dx[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { dy[i] = (shared[idb]+shared[idb+1])*alpha + beta*dy [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows @param[in] n magma_int_t number of columns @param[in] nnz_per_row magma_int_t max number of nonzeros in a row @param[in] alpha magmaDoubleComplex scalar alpha @param[in] dval magmaDoubleComplex_ptr val array @param[in] dcolind magmaIndex_ptr col indices @param[in] drowlength magmaIndex_ptr number of elements in each row @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar beta @param[out] dy magmaDoubleComplex_ptr output vector y @param[in] blocksize magma_int_t threads per block @param[in] alignment magma_int_t threads assigned to each row @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaIndex_ptr drowlength, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_int_t alignment, magma_int_t blocksize, magma_queue_t queue ) { int num_blocks = ( (m+blocksize-1)/blocksize); magma_int_t num_threads = alignment*blocksize; magma_int_t threads = alignment*blocksize; int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment) *alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = (int) sqrt( (double) num_blocks ); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaDoubleComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if ( alignment == 32 ) { zgeellrtmv_kernel_32<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 16 ) { zgeellrtmv_kernel_16<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else if ( alignment == 8 ) { zgeellrtmv_kernel_8<<< grid, threads , Ms, queue >>> ( m, n, alpha, dval, dcolind, drowlength, dx, beta, dy, alignment, real_row_length ); } else { printf("error: alignment %d not supported.\n", alignment); return MAGMA_ERR_NOT_SUPPORTED; } return MAGMA_SUCCESS; }
e4e3039cb9ab8fd0297c16bbd361b5219976422f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void initializeVolume (int3 voxels_size,PtrStep<T> volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < voxels_size.x && y < voxels_size.y) { T *pos = volume.ptr(y) + x; int z_step = voxels_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < voxels_size.z; ++z, pos+=z_step) pack_tsdf (0.f, 0, *pos); } } template<typename T> __global__ void clearSphereKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 center,float radius,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if (norm(pt - center) < radius) { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearBBoxKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 m,float3 M,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) && (pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z)) { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearCylinderKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 cylinder_center,float3 height_bearing, float radius,float half_height,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); // project the point onto the cylinder height segment float3 projected_pt = cylinder_center - height_bearing * dot(cylinder_center - pt,height_bearing); if (norm(cylinder_center - projected_pt) < half_height && // check in height segment norm(projected_pt - pt) < radius) // check in radius { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x < minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y < minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX < maxBounds.x) || (idY >= minBounds.y && idY < maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z) { T *pos = volume.ptr(y + z * buffer.voxels_size.y) + x; pack_tsdf (0.f, 0, *pos); } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z int idZ = minBounds.z; if (maxBounds.z < 0) idZ += maxBounds.z; if (idZ < 0) idZ += buffer.voxels_size.z; int nbSteps = abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z) { ///If we went outside of the memory, make sure we go back to the begining of it if(idZ + z >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; T *pos = volume.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; pack_tsdf (0.f, 0, *pos); } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // clearSliceKernel void initVolume (int3 voxels_size,PtrStep<short2> volume) { dim3 block (16, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); hipLaunchKernelGGL(( initializeVolume), dim3(grid), dim3(block), 0, 0, voxels_size,volume); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } void clearSphere(PtrStep<short2> volume,const int3 voxels_size,int3 tsdf_origin,float3 center,float radius, const bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); hipLaunchKernelGGL(( clearSphereKernel), dim3(grid), dim3(block), 0, 0, volume,voxels_size,tsdf_origin,center,radius,set_to_empty); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } void clearBBox(PtrStep<short2> volume, const int3 voxels_size, const int3& origin, const float3& m, const float3& M, const bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); hipLaunchKernelGGL(( clearBBoxKernel), dim3(grid), dim3(block), 0, 0, volume,voxels_size,origin,m,M,set_to_empty); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } void clearCylinder(PtrStep<short2> volume, const int3 voxels_size, const int3& origin, float3 cylinder_center, float3 height_bearing, float radius, float half_height, bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); hipLaunchKernelGGL(( clearCylinderKernel), dim3(grid), dim3(block), 0, 0, volume,voxels_size,origin,cylinder_center,height_bearing, radius,half_height,set_to_empty); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { struct Tsdf { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, MAX_WEIGHT = 1 << 7 }; mutable PtrStep<short2> volume; float3 cell_size; Intr intr; Mat33 Rcurr_inv; float3 tcurr; PtrStepSz<ushort> depth_raw; //depth in mm float tranc_dist_mm; __device__ __forceinline__ float3 getVoxelGCoo (int x, int y, int z) const { float3 coo = make_float3 (x, y, z); coo += 0.5f; //shift to cell center; coo.x *= cell_size.x; coo.y *= cell_size.y; coo.z *= cell_size.z; return coo; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(*pos); for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step) { float3 v_g = getVoxelGCoo (x, y, z); //3 // p //tranform to curr cam coo space float3 v = Rcurr_inv * (v_g - tcurr); //4 int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm sdf *= (-1); if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } } }; template<typename T> __global__ void uploadKnownToTSDFSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds, PtrStep<short> known_status) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x < minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y < minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX < maxBounds.x) || (idY >= minBounds.y && idY < maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z) { T *pos = volume.ptr(y + z * buffer.voxels_size.y) + x; short * ks = known_status.ptr(y + z * buffer.voxels_size.y) + x; const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z int idZ = minBounds.z; if (maxBounds.z < 0) idZ += maxBounds.z; if (idZ < 0) idZ += buffer.voxels_size.z; int nbSteps = abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z) { ///If we went outside of the memory, make sure we go back to the begining of it if(idZ + z >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; T *pos = volume.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; short * ks = known_status.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // uploadKnownToTSDFSliceKernel __global__ void integrateTsdfKernel (const Tsdf tsdf) { tsdf (); } __global__ void tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr, const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z; float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z; float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z; //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z) { float3 vr; vr.x = v_g_x; vr.y = v_g_y; vr.z = (v_g_z + z * cell_size.z); float3 v; v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z; v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z; v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z; int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } pos += elem_step; } /* for(int z = 0; z < VOLUME_Z; ++z) */ } /* __global__ */ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume) { Tsdf tsdf; tsdf.volume = volume; tsdf.cell_size.x = volume_size.x / VOLUME_X; tsdf.cell_size.y = volume_size.y / VOLUME_Y; tsdf.cell_size.z = volume_size.z / VOLUME_Z; tsdf.intr = intr; tsdf.Rcurr_inv = Rcurr_inv; tsdf.tcurr = tcurr; tsdf.depth_raw = depth_raw; tsdf.tranc_dist_mm = tranc_dist*1000; //mm dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); #if 0 //tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size); hipLaunchKernelGGL(( integrateTsdfKernel), dim3(grid), dim3(block), 0, 0, tsdf); #endif cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; int Dp = depth.ptr (y)[x]; float xl = (x - intr.cx) / intr.fx; float yl = (y - intr.cy) / intr.fy; float lambda = sqrtf (xl * xl + yl * yl + 1); scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters } __global__ void tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x - buffer.voxels_volume_padding.x || y >= buffer.voxels_size.y - buffer.voxels_volume_padding.y) return; if (x < buffer.voxels_volume_padding.x || y < buffer.voxels_volume_padding.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; int idX = x + buffer.origin_GRID.x; if (idX >= buffer.voxels_size.x) idX -= buffer.voxels_size.x; int idY = y + buffer.origin_GRID.y; if (idY >= buffer.voxels_size.y) idY -= buffer.voxels_size.y; //#pragma unroll for (int z = buffer.voxels_volume_padding.z; z < buffer.voxels_size.z - buffer.voxels_volume_padding.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory int idZ = z + buffer.origin_GRID.z; if (idZ >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; short2* pos = volume.ptr (buffer.voxels_size.y * idZ + idY) + idX; float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23_only_empty (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x - buffer.voxels_volume_padding.x || y >= buffer.voxels_size.y - buffer.voxels_volume_padding.y) return; if (x < buffer.voxels_volume_padding.x || y < buffer.voxels_volume_padding.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; int idX = x + buffer.origin_GRID.x; if (idX >= buffer.voxels_size.x) idX -= buffer.voxels_size.x; int idY = y + buffer.origin_GRID.y; if (idY >= buffer.voxels_size.y) idY -= buffer.voxels_size.y; //#pragma unroll for (int z = buffer.voxels_volume_padding.z; z < buffer.voxels_size.z - buffer.voxels_volume_padding.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory int idZ = z + buffer.origin_GRID.z; if (idZ >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; short2* pos = volume.ptr (buffer.voxels_size.y * idZ + idY) + idX; float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= tranc_dist) //meters { float tsdf = 1.0f; //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); bool integrate = true; if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2)) { const float qnan = numeric_limits<float>::quiet_NaN(); float3 normal = make_float3(qnan, qnan, qnan); float Fn, Fp; int Wn = 0, Wp = 0; unpack_tsdf (*(pos + elem_step), Fn, Wn); unpack_tsdf (*(pos - elem_step), Fp, Wp); if (Wn > 16 && Wp > 16) normal.z = (Fn - Fp)/cell_size.z; unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn); unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp); if (Wn > 16 && Wp > 16) normal.y = (Fn - Fp)/cell_size.y; unpack_tsdf (*(pos + 1), Fn, Wn); unpack_tsdf (*(pos - 1), Fp, Wp); if (Wn > 16 && Wp > 16) normal.x = (Fn - Fp)/cell_size.x; if (normal.x != qnan && normal.y != qnan && normal.z != qnan) { float norm2 = dot(normal, normal); if (norm2 >= 1e-10) { normal *= rsqrt(norm2); float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z; float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z); if (cosine < 0.5) integrate = false; } } } if (integrate) { //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr); cudaSafeCall ( hipGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); hipLaunchKernelGGL(( tsdf23), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolumeOnlyEmpty (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. hipLaunchKernelGGL(( scaleDepth), dim3(grid_scale), dim3(block_scale), 0, 0, depth, depthScaled, intr); cudaSafeCall ( hipGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); hipLaunchKernelGGL(( tsdf23_only_empty), dim3(grid), dim3(block), 0, 0, depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ) { int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int3 minBounds, maxBounds; //X if(newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); //Z minBounds.z = buffer->origin_GRID.z; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); hipLaunchKernelGGL(( clearSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void uploadKnownToTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ, PtrStep<short> known_status) { int oldX = buffer->origin_GRID.x - shiftX; int oldY = buffer->origin_GRID.y - shiftY; int oldZ = buffer->origin_GRID.z - shiftZ; int3 minBounds, maxBounds; //X if(oldX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = oldX; } else { minBounds.x = oldX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(oldY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = oldY; } else { minBounds.y = oldY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); while (oldZ < 0) oldZ += buffer->voxels_size.z; while (oldZ >= buffer->voxels_size.z) oldZ -= buffer->voxels_size.z; //Z minBounds.z = oldZ; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); hipLaunchKernelGGL(( uploadKnownToTSDFSliceKernel), dim3(grid), dim3(block), 0, 0, volume, *buffer, minBounds, maxBounds, known_status); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } template<typename T> __global__ void uploadKnownToBBoxKernel(PtrStep<T> volume,int3 volume_size,int3 shift,int3 m,int3 M, PtrStep<short> known_status) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int3 known_status_size; known_status_size.x = M.x - m.x; known_status_size.y = M.y - m.y; known_status_size.z = M.z - m.z; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) && (pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z)) { short * ks = known_status.ptr((y - m.y) + (z - m.z) * known_status_size.y) + (x - m.x); const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } } } void uploadKnownToBBox (PtrStep<short2> volume, const int3 voxels_size,const int3& origin, const int3& m,const int3& M, PtrStep<short> known_status) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); hipLaunchKernelGGL(( uploadKnownToBBoxKernel), dim3(grid), dim3(block), 0, 0, volume,voxels_size,origin,m,M,known_status); cudaSafeCall ( hipGetLastError () ); cudaSafeCall (hipDeviceSynchronize ()); } } } }
e4e3039cb9ab8fd0297c16bbd361b5219976422f.cu
/* * Software License Agreement (BSD License) * * Point Cloud Library (PCL) - www.pointclouds.org * Copyright (c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include "device.hpp" //#include <boost/graph/buffer_concepts.hpp> namespace pcl { namespace device { namespace kinfuLS { template<typename T> __global__ void initializeVolume (int3 voxels_size,PtrStep<T> volume) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < voxels_size.x && y < voxels_size.y) { T *pos = volume.ptr(y) + x; int z_step = voxels_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < voxels_size.z; ++z, pos+=z_step) pack_tsdf (0.f, 0, *pos); } } template<typename T> __global__ void clearSphereKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 center,float radius,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if (norm(pt - center) < radius) { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearBBoxKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 m,float3 M,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) && (pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z)) { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearCylinderKernel(PtrStep<T> volume,int3 volume_size,int3 shift,float3 cylinder_center,float3 height_bearing, float radius,float half_height,bool set_to_empty) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; int z_step = volume_size.y * volume.step / sizeof(*pos); #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); // project the point onto the cylinder height segment float3 projected_pt = cylinder_center - height_bearing * dot(cylinder_center - pt,height_bearing); if (norm(cylinder_center - projected_pt) < half_height && // check in height segment norm(projected_pt - pt) < radius) // check in radius { if (set_to_empty) pack_tsdf(1.0f, 1, *(pos + (az * z_step))); else pack_tsdf(0.f, 0, *(pos + (az * z_step))); } } } } template<typename T> __global__ void clearSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x < minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y < minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX < maxBounds.x) || (idY >= minBounds.y && idY < maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z) { T *pos = volume.ptr(y + z * buffer.voxels_size.y) + x; pack_tsdf (0.f, 0, *pos); } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z int idZ = minBounds.z; if (maxBounds.z < 0) idZ += maxBounds.z; if (idZ < 0) idZ += buffer.voxels_size.z; int nbSteps = abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z) { ///If we went outside of the memory, make sure we go back to the begining of it if(idZ + z >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; T *pos = volume.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; pack_tsdf (0.f, 0, *pos); } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // clearSliceKernel void initVolume (int3 voxels_size,PtrStep<short2> volume) { dim3 block (16, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); initializeVolume<<<grid, block>>>(voxels_size,volume); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } void clearSphere(PtrStep<short2> volume,const int3 voxels_size,int3 tsdf_origin,float3 center,float radius, const bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); clearSphereKernel<<<grid, block>>>(volume,voxels_size,tsdf_origin,center,radius,set_to_empty); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } void clearBBox(PtrStep<short2> volume, const int3 voxels_size, const int3& origin, const float3& m, const float3& M, const bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); clearBBoxKernel<<<grid, block>>>(volume,voxels_size,origin,m,M,set_to_empty); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } void clearCylinder(PtrStep<short2> volume, const int3 voxels_size, const int3& origin, float3 cylinder_center, float3 height_bearing, float radius, float half_height, bool set_to_empty) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); clearCylinderKernel<<<grid, block>>>(volume,voxels_size,origin,cylinder_center,height_bearing, radius,half_height,set_to_empty); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { struct Tsdf { enum { CTA_SIZE_X = 32, CTA_SIZE_Y = 8, MAX_WEIGHT = 1 << 7 }; mutable PtrStep<short2> volume; float3 cell_size; Intr intr; Mat33 Rcurr_inv; float3 tcurr; PtrStepSz<ushort> depth_raw; //depth in mm float tranc_dist_mm; __device__ __forceinline__ float3 getVoxelGCoo (int x, int y, int z) const { float3 coo = make_float3 (x, y, z); coo += 0.5f; //shift to cell center; coo.x *= cell_size.x; coo.y *= cell_size.y; coo.z *= cell_size.z; return coo; } __device__ __forceinline__ void operator () () const { int x = threadIdx.x + blockIdx.x * CTA_SIZE_X; int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(*pos); for (int z = 0; z < VOLUME_Z; ++z, pos += elem_step) { float3 v_g = getVoxelGCoo (x, y, z); //3 // p //tranform to curr cam coo space float3 v = Rcurr_inv * (v_g - tcurr); //4 int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = 1000 * norm (tcurr - v_g) * lambda_inv - Dp; //mm sdf *= (-1); if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } } }; template<typename T> __global__ void uploadKnownToTSDFSliceKernel (PtrStep<T> volume, pcl::gpu::kinfuLS::tsdf_buffer buffer, int3 minBounds, int3 maxBounds, PtrStep<short> known_status) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //compute relative indices int idX, idY; if(x < minBounds.x) idX = x + buffer.voxels_size.x; else idX = x; if(y < minBounds.y) idY = y + buffer.voxels_size.y; else idY = y; if ( x < buffer.voxels_size.x && y < buffer.voxels_size.y) { if( (idX >= minBounds.x && idX < maxBounds.x) || (idY >= minBounds.y && idY < maxBounds.y) ) { // BLACK ZONE => clear on all Z values ///Move along z axis #pragma unroll for(int z = 0; z < buffer.voxels_size.z; ++z) { T *pos = volume.ptr(y + z * buffer.voxels_size.y) + x; short * ks = known_status.ptr(y + z * buffer.voxels_size.y) + x; const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ { ///RED ZONE => clear only appropriate Z int idZ = minBounds.z; if (maxBounds.z < 0) idZ += maxBounds.z; if (idZ < 0) idZ += buffer.voxels_size.z; int nbSteps = abs(maxBounds.z); #pragma unroll for(int z = 0; z < nbSteps; ++z) { ///If we went outside of the memory, make sure we go back to the begining of it if(idZ + z >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; T *pos = volume.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; short * ks = known_status.ptr(y + (idZ + z) * buffer.voxels_size.y) + x; const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } //else /* if( idX > maxBounds.x && idY > maxBounds.y)*/ } // if ( x < VOLUME_X && y < VOLUME_Y) } // uploadKnownToTSDFSliceKernel __global__ void integrateTsdfKernel (const Tsdf tsdf) { tsdf (); } __global__ void tsdf2 (PtrStep<short2> volume, const float tranc_dist_mm, const Mat33 Rcurr_inv, float3 tcurr, const Intr intr, const PtrStepSz<ushort> depth_raw, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; short2 *pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_x = Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z; float v_y = Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z; float v_z = Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z; //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z) { float3 vr; vr.x = v_g_x; vr.y = v_g_y; vr.z = (v_g_z + z * cell_size.z); float3 v; v.x = v_x + Rcurr_inv.data[0].z * z * cell_size.z; v.y = v_y + Rcurr_inv.data[1].z * z * cell_size.z; v.z = v_z + Rcurr_inv.data[2].z * z * cell_size.z; int2 coo; //project to current cam coo.x = __float2int_rn (v.x * intr.fx / v.z + intr.cx); coo.y = __float2int_rn (v.y * intr.fy / v.z + intr.cy); if (v.z > 0 && coo.x >= 0 && coo.y >= 0 && coo.x < depth_raw.cols && coo.y < depth_raw.rows) //6 { int Dp = depth_raw.ptr (coo.y)[coo.x]; //mm if (Dp != 0) { float xl = (coo.x - intr.cx) / intr.fx; float yl = (coo.y - intr.cy) / intr.fy; float lambda_inv = rsqrtf (xl * xl + yl * yl + 1); float sdf = Dp - norm (vr) * lambda_inv * 1000; //mm if (sdf >= -tranc_dist_mm) { float tsdf = fmin (1.f, sdf / tranc_dist_mm); int weight_prev; float tsdf_prev; //read and unpack unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } pos += elem_step; } /* for(int z = 0; z < VOLUME_Z; ++z) */ } /* __global__ */ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth_raw, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume) { Tsdf tsdf; tsdf.volume = volume; tsdf.cell_size.x = volume_size.x / VOLUME_X; tsdf.cell_size.y = volume_size.y / VOLUME_Y; tsdf.cell_size.z = volume_size.z / VOLUME_Z; tsdf.intr = intr; tsdf.Rcurr_inv = Rcurr_inv; tsdf.tcurr = tcurr; tsdf.depth_raw = depth_raw; tsdf.tranc_dist_mm = tranc_dist*1000; //mm dim3 block (Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y)); #if 0 //tsdf2<<<grid, block>>>(volume, tranc_dist, Rcurr_inv, tcurr, intr, depth_raw, tsdf.cell_size); integrateTsdfKernel<<<grid, block>>>(tsdf); #endif cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } } namespace pcl { namespace device { namespace kinfuLS { __global__ void scaleDepth (const PtrStepSz<ushort> depth, PtrStep<float> scaled, const Intr intr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= depth.cols || y >= depth.rows) return; int Dp = depth.ptr (y)[x]; float xl = (x - intr.cx) / intr.fx; float yl = (y - intr.cy) / intr.fy; float lambda = sqrtf (xl * xl + yl * yl + 1); scaled.ptr (y)[x] = Dp * lambda/1000.f; //meters } __global__ void tsdf23 (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x - buffer.voxels_volume_padding.x || y >= buffer.voxels_size.y - buffer.voxels_volume_padding.y) return; if (x < buffer.voxels_volume_padding.x || y < buffer.voxels_volume_padding.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; int idX = x + buffer.origin_GRID.x; if (idX >= buffer.voxels_size.x) idX -= buffer.voxels_size.x; int idY = y + buffer.origin_GRID.y; if (idY >= buffer.voxels_size.y) idY -= buffer.voxels_size.y; //#pragma unroll for (int z = buffer.voxels_volume_padding.z; z < buffer.voxels_size.z - buffer.voxels_volume_padding.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory int idZ = z + buffer.origin_GRID.z; if (idZ >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; short2* pos = volume.ptr (buffer.voxels_size.y * idZ + idY) + idX; float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23_only_empty (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size, const pcl::gpu::kinfuLS::tsdf_buffer buffer) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= buffer.voxels_size.x - buffer.voxels_volume_padding.x || y >= buffer.voxels_size.y - buffer.voxels_volume_padding.y) return; if (x < buffer.voxels_volume_padding.x || y < buffer.voxels_volume_padding.y) return; float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; int idX = x + buffer.origin_GRID.x; if (idX >= buffer.voxels_size.x) idX -= buffer.voxels_size.x; int idY = y + buffer.origin_GRID.y; if (idY >= buffer.voxels_size.y) idY -= buffer.voxels_size.y; //#pragma unroll for (int z = buffer.voxels_volume_padding.z; z < buffer.voxels_size.z - buffer.voxels_volume_padding.z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled) { // As the pointer is incremented in the for loop, we have to make sure that the pointer is never outside the memory int idZ = z + buffer.origin_GRID.z; if (idZ >= buffer.voxels_size.z) idZ -= buffer.voxels_size.z; short2* pos = volume.ptr (buffer.voxels_size.y * idZ + idY) + idX; float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= tranc_dist) //meters { float tsdf = 1.0f; //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ __global__ void tsdf23normal_hack (const PtrStepSz<float> depthScaled, PtrStep<short2> volume, const float tranc_dist, const Mat33 Rcurr_inv, const float3 tcurr, const Intr intr, const float3 cell_size) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= VOLUME_X || y >= VOLUME_Y) return; const float v_g_x = (x + 0.5f) * cell_size.x - tcurr.x; const float v_g_y = (y + 0.5f) * cell_size.y - tcurr.y; float v_g_z = (0 + 0.5f) * cell_size.z - tcurr.z; float v_g_part_norm = v_g_x * v_g_x + v_g_y * v_g_y; float v_x = (Rcurr_inv.data[0].x * v_g_x + Rcurr_inv.data[0].y * v_g_y + Rcurr_inv.data[0].z * v_g_z) * intr.fx; float v_y = (Rcurr_inv.data[1].x * v_g_x + Rcurr_inv.data[1].y * v_g_y + Rcurr_inv.data[1].z * v_g_z) * intr.fy; float v_z = (Rcurr_inv.data[2].x * v_g_x + Rcurr_inv.data[2].y * v_g_y + Rcurr_inv.data[2].z * v_g_z); float z_scaled = 0; float Rcurr_inv_0_z_scaled = Rcurr_inv.data[0].z * cell_size.z * intr.fx; float Rcurr_inv_1_z_scaled = Rcurr_inv.data[1].z * cell_size.z * intr.fy; float tranc_dist_inv = 1.0f / tranc_dist; short2* pos = volume.ptr (y) + x; int elem_step = volume.step * VOLUME_Y / sizeof(short2); //#pragma unroll for (int z = 0; z < VOLUME_Z; ++z, v_g_z += cell_size.z, z_scaled += cell_size.z, v_x += Rcurr_inv_0_z_scaled, v_y += Rcurr_inv_1_z_scaled, pos += elem_step) { float inv_z = 1.0f / (v_z + Rcurr_inv.data[2].z * z_scaled); if (inv_z < 0) continue; // project to current cam int2 coo = { __float2int_rn (v_x * inv_z + intr.cx), __float2int_rn (v_y * inv_z + intr.cy) }; if (coo.x >= 0 && coo.y >= 0 && coo.x < depthScaled.cols && coo.y < depthScaled.rows) //6 { float Dp_scaled = depthScaled.ptr (coo.y)[coo.x]; //meters float sdf = Dp_scaled - sqrtf (v_g_z * v_g_z + v_g_part_norm); if (Dp_scaled != 0 && sdf >= -tranc_dist) //meters { float tsdf = fmin (1.0f, sdf * tranc_dist_inv); bool integrate = true; if ((x > 0 && x < VOLUME_X-2) && (y > 0 && y < VOLUME_Y-2) && (z > 0 && z < VOLUME_Z-2)) { const float qnan = numeric_limits<float>::quiet_NaN(); float3 normal = make_float3(qnan, qnan, qnan); float Fn, Fp; int Wn = 0, Wp = 0; unpack_tsdf (*(pos + elem_step), Fn, Wn); unpack_tsdf (*(pos - elem_step), Fp, Wp); if (Wn > 16 && Wp > 16) normal.z = (Fn - Fp)/cell_size.z; unpack_tsdf (*(pos + volume.step/sizeof(short2) ), Fn, Wn); unpack_tsdf (*(pos - volume.step/sizeof(short2) ), Fp, Wp); if (Wn > 16 && Wp > 16) normal.y = (Fn - Fp)/cell_size.y; unpack_tsdf (*(pos + 1), Fn, Wn); unpack_tsdf (*(pos - 1), Fp, Wp); if (Wn > 16 && Wp > 16) normal.x = (Fn - Fp)/cell_size.x; if (normal.x != qnan && normal.y != qnan && normal.z != qnan) { float norm2 = dot(normal, normal); if (norm2 >= 1e-10) { normal *= rsqrt(norm2); float nt = v_g_x * normal.x + v_g_y * normal.y + v_g_z * normal.z; float cosine = nt * rsqrt(v_g_x * v_g_x + v_g_y * v_g_y + v_g_z * v_g_z); if (cosine < 0.5) integrate = false; } } } if (integrate) { //read and unpack float tsdf_prev; int weight_prev; unpack_tsdf (*pos, tsdf_prev, weight_prev); const int Wrk = 1; float tsdf_new = (tsdf_prev * weight_prev + Wrk * tsdf) / (weight_prev + Wrk); int weight_new = min (weight_prev + Wrk, Tsdf::MAX_WEIGHT); pack_tsdf (tsdf_new, weight_new, *pos); } } } } // for(int z = 0; z < VOLUME_Z; ++z) } // __global__ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolume (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr); cudaSafeCall ( cudaGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); tsdf23<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void integrateTsdfVolumeOnlyEmpty (const PtrStepSz<ushort>& depth, const Intr& intr, const float3& volume_size, const Mat33& Rcurr_inv, const float3& tcurr, float tranc_dist, PtrStep<short2> volume, const pcl::gpu::kinfuLS::tsdf_buffer* buffer, DeviceArray2D<float>& depthScaled) { depthScaled.create (depth.rows, depth.cols); dim3 block_scale (32, 8); dim3 grid_scale (divUp (depth.cols, block_scale.x), divUp (depth.rows, block_scale.y)); //scales depth along ray and converts mm -> meters. scaleDepth<<<grid_scale, block_scale>>>(depth, depthScaled, intr); cudaSafeCall ( cudaGetLastError () ); float3 cell_size; cell_size.x = volume_size.x / buffer->voxels_size.x; cell_size.y = volume_size.y / buffer->voxels_size.y; cell_size.z = volume_size.z / buffer->voxels_size.z; //dim3 block(Tsdf::CTA_SIZE_X, Tsdf::CTA_SIZE_Y); dim3 block (16, 16); dim3 grid (divUp (buffer->voxels_size.x, block.x), divUp (buffer->voxels_size.y, block.y)); tsdf23_only_empty<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size, *buffer); //tsdf23normal_hack<<<grid, block>>>(depthScaled, volume, tranc_dist, Rcurr_inv, tcurr, intr, cell_size); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void clearTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ) { int newX = buffer->origin_GRID.x + shiftX; int newY = buffer->origin_GRID.y + shiftY; int3 minBounds, maxBounds; //X if(newX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = newX; } else { minBounds.x = newX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(newY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = newY; } else { minBounds.y = newY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); //Z minBounds.z = buffer->origin_GRID.z; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); clearSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////// void uploadKnownToTSDFSlice (PtrStep<short2> volume, pcl::gpu::kinfuLS::tsdf_buffer* buffer, int shiftX, int shiftY, int shiftZ, PtrStep<short> known_status) { int oldX = buffer->origin_GRID.x - shiftX; int oldY = buffer->origin_GRID.y - shiftY; int oldZ = buffer->origin_GRID.z - shiftZ; int3 minBounds, maxBounds; //X if(oldX >= 0) { minBounds.x = buffer->origin_GRID.x; maxBounds.x = oldX; } else { minBounds.x = oldX + buffer->voxels_size.x; maxBounds.x = buffer->origin_GRID.x + buffer->voxels_size.x; } if(minBounds.x > maxBounds.x) std::swap(minBounds.x, maxBounds.x); //Y if(oldY >= 0) { minBounds.y = buffer->origin_GRID.y; maxBounds.y = oldY; } else { minBounds.y = oldY + buffer->voxels_size.y; maxBounds.y = buffer->origin_GRID.y + buffer->voxels_size.y; } if(minBounds.y > maxBounds.y) std::swap(minBounds.y, maxBounds.y); while (oldZ < 0) oldZ += buffer->voxels_size.z; while (oldZ >= buffer->voxels_size.z) oldZ -= buffer->voxels_size.z; //Z minBounds.z = oldZ; maxBounds.z = shiftZ; // call kernel dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (buffer->voxels_size.x, block.x); grid.y = divUp (buffer->voxels_size.y, block.y); uploadKnownToTSDFSliceKernel<<<grid, block>>>(volume, *buffer, minBounds, maxBounds, known_status); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } template<typename T> __global__ void uploadKnownToBBoxKernel(PtrStep<T> volume,int3 volume_size,int3 shift,int3 m,int3 M, PtrStep<short> known_status) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int3 known_status_size; known_status_size.x = M.x - m.x; known_status_size.y = M.y - m.y; known_status_size.z = M.z - m.z; if (x < volume_size.x && y < volume_size.y) { int ax = x + shift.x; if (ax >= volume_size.x) ax -= volume_size.x; int ay = y + shift.y; if (ay >= volume_size.y) ay -= volume_size.y; T *pos = volume.ptr(ay) + ax; #pragma unroll for(int z = 0; z < volume_size.z; ++z) { int az = z + shift.z; if (az >= volume_size.z) az -= volume_size.z; float3 pt; pt.x = float(x); pt.y = float(y); pt.z = float(z); if ((pt.x >= m.x) && (pt.y >= m.y) && (pt.z >= m.z) && (pt.x < M.x) && (pt.y < M.y) && (pt.z < M.z)) { short * ks = known_status.ptr((y - m.y) + (z - m.z) * known_status_size.y) + (x - m.x); const short increment = *ks; if (increment) { float tsdf; int w; unpack_tsdf(*pos, tsdf, w); if (w == 0) tsdf = 1.0; pack_tsdf (tsdf, min(increment + w,(Tsdf::MAX_WEIGHT)), *pos); } } } } } void uploadKnownToBBox (PtrStep<short2> volume, const int3 voxels_size,const int3& origin, const int3& m,const int3& M, PtrStep<short> known_status) { dim3 block (32, 16); dim3 grid (1, 1, 1); grid.x = divUp (voxels_size.x, block.x); grid.y = divUp (voxels_size.y, block.y); uploadKnownToBBoxKernel<<<grid, block>>>(volume,voxels_size,origin,m,M,known_status); cudaSafeCall ( cudaGetLastError () ); cudaSafeCall (cudaDeviceSynchronize ()); } } } }
9f1a8d2160a42b81863703cbdecf3a240a0ecb75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Includes //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "mergesort.cuh" #include "mergesort_kernel.cu" //////////////////////////////////////////////////////////////////////////////// // Defines //////////////////////////////////////////////////////////////////////////////// #define BLOCKSIZE 256 #define ROW_LENGTH BLOCKSIZE * 4 #define ROWS 4096 //////////////////////////////////////////////////////////////////////////////// // The mergesort algorithm //////////////////////////////////////////////////////////////////////////////// float4* runMergeSort(int listsize, int divisions, float4 *d_origList, float4 *d_resultList, int *sizes, int *nullElements, unsigned int *origOffsets) { int *startaddr = (int *)malloc((divisions + 1)*sizeof(int)); int largestSize = -1; startaddr[0] = 0; for(int i=1; i<=divisions; i++) { startaddr[i] = startaddr[i-1] + sizes[i-1]; if(sizes[i-1] > largestSize) largestSize = sizes[i-1]; } largestSize *= 4; // Setup texture hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat); tex.addressMode[0] = hipAddressModeWrap; tex.addressMode[1] = hipAddressModeWrap; tex.filterMode = hipFilterModePoint; tex.normalized = false; //////////////////////////////////////////////////////////////////////////// // First sort all float4 elements internally //////////////////////////////////////////////////////////////////////////// #ifdef MERGE_WG_SIZE_0 const int THREADS = MERGE_WG_SIZE_0; #else const int THREADS = 256; #endif dim3 threads(THREADS, 1); int blocks = ((listsize/4)%THREADS == 0) ? (listsize/4)/THREADS : (listsize/4)/THREADS + 1; dim3 grid(blocks, 1); hipBindTexture(0,tex, d_origList, channelDesc, listsize*sizeof(float)); hipLaunchKernelGGL(( mergeSortFirst), dim3(grid), dim3(threads) , 0, 0, d_resultList, listsize); //////////////////////////////////////////////////////////////////////////// // Then, go level by level //////////////////////////////////////////////////////////////////////////// hipMemcpyToSymbol(constStartAddr, startaddr, (divisions + 1)*sizeof(int)); hipMemcpyToSymbol(finalStartAddr, origOffsets, (divisions + 1)*sizeof(int)); hipMemcpyToSymbol(nullElems, nullElements, (divisions)*sizeof(int)); int nrElems = 2; while(true){ int floatsperthread = (nrElems*4); int threadsPerDiv = (int)ceil(largestSize/(float)floatsperthread); int threadsNeeded = threadsPerDiv * divisions; #ifdef MERGE_WG_SIZE_1 threads.x = MERGE_WG_SIZE_1; #else threads.x = 208; #endif grid.x = ((threadsNeeded%threads.x) == 0) ? threadsNeeded/threads.x : (threadsNeeded/threads.x) + 1; if(grid.x < 8){ grid.x = 8; threads.x = ((threadsNeeded%grid.x) == 0) ? threadsNeeded / grid.x : (threadsNeeded / grid.x) + 1; } // Swap orig/result list float4 *tempList = d_origList; d_origList = d_resultList; d_resultList = tempList; hipBindTexture(0,tex, d_origList, channelDesc, listsize*sizeof(float)); hipLaunchKernelGGL(( mergeSortPass) , dim3(grid), dim3(threads) , 0, 0, d_resultList, nrElems, threadsPerDiv); nrElems *= 2; floatsperthread = (nrElems*4); if(threadsPerDiv == 1) break; } //////////////////////////////////////////////////////////////////////////// // Now, get rid of the NULL elements //////////////////////////////////////////////////////////////////////////// #ifdef MERGE_WG_SIZE_0 threads.x = MERGE_WG_SIZE_0; #else threads.x = 256; #endif grid.x = ((largestSize%threads.x) == 0) ? largestSize/threads.x : (largestSize/threads.x) + 1; grid.y = divisions; hipLaunchKernelGGL(( mergepack) , dim3(grid), dim3(threads) , 0, 0, (float *)d_resultList, (float *)d_origList); free(startaddr); return d_origList; }
9f1a8d2160a42b81863703cbdecf3a240a0ecb75.cu
//////////////////////////////////////////////////////////////////////////////// // Includes //////////////////////////////////////////////////////////////////////////////// #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include "mergesort.cuh" #include "mergesort_kernel.cu" //////////////////////////////////////////////////////////////////////////////// // Defines //////////////////////////////////////////////////////////////////////////////// #define BLOCKSIZE 256 #define ROW_LENGTH BLOCKSIZE * 4 #define ROWS 4096 //////////////////////////////////////////////////////////////////////////////// // The mergesort algorithm //////////////////////////////////////////////////////////////////////////////// float4* runMergeSort(int listsize, int divisions, float4 *d_origList, float4 *d_resultList, int *sizes, int *nullElements, unsigned int *origOffsets) { int *startaddr = (int *)malloc((divisions + 1)*sizeof(int)); int largestSize = -1; startaddr[0] = 0; for(int i=1; i<=divisions; i++) { startaddr[i] = startaddr[i-1] + sizes[i-1]; if(sizes[i-1] > largestSize) largestSize = sizes[i-1]; } largestSize *= 4; // Setup texture cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat); tex.addressMode[0] = cudaAddressModeWrap; tex.addressMode[1] = cudaAddressModeWrap; tex.filterMode = cudaFilterModePoint; tex.normalized = false; //////////////////////////////////////////////////////////////////////////// // First sort all float4 elements internally //////////////////////////////////////////////////////////////////////////// #ifdef MERGE_WG_SIZE_0 const int THREADS = MERGE_WG_SIZE_0; #else const int THREADS = 256; #endif dim3 threads(THREADS, 1); int blocks = ((listsize/4)%THREADS == 0) ? (listsize/4)/THREADS : (listsize/4)/THREADS + 1; dim3 grid(blocks, 1); cudaBindTexture(0,tex, d_origList, channelDesc, listsize*sizeof(float)); mergeSortFirst<<< grid, threads >>>(d_resultList, listsize); //////////////////////////////////////////////////////////////////////////// // Then, go level by level //////////////////////////////////////////////////////////////////////////// cudaMemcpyToSymbol(constStartAddr, startaddr, (divisions + 1)*sizeof(int)); cudaMemcpyToSymbol(finalStartAddr, origOffsets, (divisions + 1)*sizeof(int)); cudaMemcpyToSymbol(nullElems, nullElements, (divisions)*sizeof(int)); int nrElems = 2; while(true){ int floatsperthread = (nrElems*4); int threadsPerDiv = (int)ceil(largestSize/(float)floatsperthread); int threadsNeeded = threadsPerDiv * divisions; #ifdef MERGE_WG_SIZE_1 threads.x = MERGE_WG_SIZE_1; #else threads.x = 208; #endif grid.x = ((threadsNeeded%threads.x) == 0) ? threadsNeeded/threads.x : (threadsNeeded/threads.x) + 1; if(grid.x < 8){ grid.x = 8; threads.x = ((threadsNeeded%grid.x) == 0) ? threadsNeeded / grid.x : (threadsNeeded / grid.x) + 1; } // Swap orig/result list float4 *tempList = d_origList; d_origList = d_resultList; d_resultList = tempList; cudaBindTexture(0,tex, d_origList, channelDesc, listsize*sizeof(float)); mergeSortPass <<< grid, threads >>>(d_resultList, nrElems, threadsPerDiv); nrElems *= 2; floatsperthread = (nrElems*4); if(threadsPerDiv == 1) break; } //////////////////////////////////////////////////////////////////////////// // Now, get rid of the NULL elements //////////////////////////////////////////////////////////////////////////// #ifdef MERGE_WG_SIZE_0 threads.x = MERGE_WG_SIZE_0; #else threads.x = 256; #endif grid.x = ((largestSize%threads.x) == 0) ? largestSize/threads.x : (largestSize/threads.x) + 1; grid.y = divisions; mergepack <<< grid, threads >>> ((float *)d_resultList, (float *)d_origList); free(startaddr); return d_origList; }
fe153a14ed926d6e70fe7debdf223dda9e863f9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x * 2 + threadIdx.x; shared_data[tid] = in_data[index] + in_data[index + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { shared_data[tid] += shared_data[tid + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 256; // __shared__ int shared_data[]; const int array_size = 1 << 22; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; hipMalloc(&d_array, sizeof(int) * array_size); hipMemcpy(d_array, h_array, sizeof(int) * array_size, hipMemcpyHostToDevice); int num_blocks = array_size / block_size / 2; int* d_blocksum; hipMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; hipEvent_t start; hipEvent_t stop; // Creating event hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( Reduce), dim3(num_blocks), dim3(block_size), sizeof(int) * block_size, 0, d_array, d_blocksum); hipEventRecord(stop); hipMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, hipMemcpyDeviceToHost); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; hipFree(d_blocksum); hipFree(d_array); delete[] h_array; delete[] h_blocksum; }
fe153a14ed926d6e70fe7debdf223dda9e863f9a.cu
#include <iostream> __global__ void Reduce(int* in_data, int* out_data) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; unsigned int index = blockIdx.x * blockDim.x * 2 + threadIdx.x; shared_data[tid] = in_data[index] + in_data[index + blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { shared_data[tid] += shared_data[tid + s]; } __syncthreads(); } if (tid == 0) { out_data[blockIdx.x] = shared_data[0]; } } int main() { const int block_size = 256; // __shared__ int shared_data[]; const int array_size = 1 << 22; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = 1; } int* d_array; cudaMalloc(&d_array, sizeof(int) * array_size); cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice); int num_blocks = array_size / block_size / 2; int* d_blocksum; cudaMalloc(&d_blocksum, sizeof(int) * num_blocks); int* h_blocksum = new int[num_blocks]; cudaEvent_t start; cudaEvent_t stop; // Creating event cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); Reduce<<<num_blocks, block_size, sizeof(int) * block_size>>>(d_array, d_blocksum); cudaEventRecord(stop); cudaMemcpy(h_blocksum, d_blocksum, sizeof(int) * num_blocks, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed" << std::endl; int sum = 0; for (int i = 0; i < num_blocks; ++i) { sum += h_blocksum[i]; } std::cout << sum << std::endl; cudaFree(d_blocksum); cudaFree(d_array); delete[] h_array; delete[] h_blocksum; }
9f8da0622134faca8ec655e865bc485bbef2043a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztrtri_lower.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "dtrtri.cuh" #include "dtrtri_lower_device.cuh" /******************************************************************************/ __global__ void dtrtri_diag_lower_kernel( magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA) { dtrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_dgemm16_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
9f8da0622134faca8ec655e865bc485bbef2043a.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @generated from magmablas/ztrtri_lower.cu, normal z -> d, Mon Jun 25 18:24:13 2018 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar This file implements lower case, and is called by dtrtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "dtrtri.cuh" #include "dtrtri_lower_device.cuh" /******************************************************************************/ __global__ void dtrtri_diag_lower_kernel( magma_diag_t diag, int n, const double *A, int lda, double *d_dinvA) { dtrtri_diag_lower_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_dgemm16_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm16_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm16_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm16_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm32_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm32_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm32_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm64_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part1_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part1_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part2_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part2_lower_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_dgemm_above64_part3_lower_kernel( int n, const double *Ain, int lda, double *d_dinvA, int jb, int npages) { triple_dgemm_above64_part3_lower_device( n, Ain, lda, d_dinvA, jb, npages); }
d9852ab2316d3657bed2a6e7c2157aa0f5e117a8.hip
// !!! This is a file automatically generated by hipify!!! /* * Prashant Solanki (Unity: psolank) * Simple Image convolutions implementation without tiling * Convolutions mask is stored in constant memory * Tested with CUDA Toolkit 3.0 */ #include <stdio.h> #include <stdlib.h> #include <string.h> //#include <hip/hip_runtime.h> #define BUF_SIZE 200000 #define ThreadsPerBlockX 8 #define ThreadsPerBlockY 8 #define CUDA_CALL(X) if(hipSuccess != X) printf("Call Failed at %s\n",__LINE__); int count_cols(char *buff); void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2); void print2D(float *arr, int r, int c); float* alloc2D(int r, int c); void free2D(float *arr); void parse2D(FILE *f, float *arr, int r, int c); void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py); void flip_kernel(float * arr, int r, int c); // Constant cache memory to store convolution mask and its size __constant__ float dMask[100]; __constant__ int dHalfW; __constant__ int dHalfH; // kernel to convolve image with mask // one thread processes one pixel in input image __global__ void conv2DKernel(float *in, float *out, int r1, int c1) { int i,j; int x,y; int maskIndex = 0; // computing row and column of pixel for which convolution os to be done int r = blockIdx.y*blockDim.y + threadIdx.y; int c = blockIdx.x*blockDim.x + threadIdx.x; float acc = 0.0f; // accessing neighbouring pixels and multiplying with mask for(i = -dHalfH; i <= dHalfH; i++){ for(j = -dHalfW; j <= dHalfW; j++){ x = c + j; y = r + i; // condition to check if element is outside the image if(x >= 0 && x < c1 && y >= 0 && y < r1){ acc = acc + (dMask[maskIndex] * in[ y*c1 + x ]); } maskIndex++; } } // condition to check if element is outside image if(r < r1 && c < c1){ out[ r*c1 + c ] = acc; } } int main(int argc, char **argv) { float *hInput; float *hMask; float *hOutput; float *dInput; float *dOutput; int r1,c1,r2,c2, R, C; FILE *fptr; if(argc < 2) { printf(" Please specify input filename\n"); return -1;} // Finding dimensions of input matricex count_rc(argv[1],&r1, &c1, &r2, &c2); if(r1 == 0) return -1; // conputing dimensions of output matrix R = (r1 + r2) -1; C = (c1 + c2) -1; // allocating input matrices hInput = alloc2D(R, C); // zeroing the input matrix memset(hInput, 0, sizeof(float)*R*C); // allocation mask hMask = alloc2D(10, 10); // allocating output matix hOutput = alloc2D(R, C); // opening input file fptr = fopen(argv[1], "rb"); // parsing first matrix withing the padded region defined as c2/2 and r2/2 parse2DPadded(fptr, hInput, r1, c1, c2/2, r2/2); // parsing mask parse2D(fptr, hMask, r2, c2); // closing the file fclose(fptr); // flipping kernel vertically and horizontally flip_kernel(hMask, r2, c2); // print2D(hMask, r2, c2); r2 = r2/2; c2 = c2/2; // allocating gpu memory CUDA_CALL(hipMalloc((void**)&dInput, R*C*sizeof(float))); //err = hipMalloc((void**)&dMask, r2*c2*sizeof(float)); CUDA_CALL(hipMalloc((void**)&dOutput, R*C*sizeof(float))); // Copy memory to the GPU CUDA_CALL(hipMemcpy(dInput, hInput, sizeof(float)*R*C, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpyToSymbol(dMask, hMask, sizeof(float)*10*10, 0, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpyToSymbol(dHalfW, (const int*)&r2, sizeof(int), 0, hipMemcpyHostToDevice)); CUDA_CALL(hipMemcpyToSymbol(dHalfH, (const int*)&c2, sizeof(int), 0, hipMemcpyHostToDevice)); // Initialize the grid and block dimensions dim3 numThreads(ThreadsPerBlockX,ThreadsPerBlockY,1); dim3 numBlocks( ((C-1)/ThreadsPerBlockX)+1, ((R-1)/ThreadsPerBlockY)+1, 1 ); // Launch the GPU Kernel hipLaunchKernelGGL(( conv2DKernel), dim3(numBlocks), dim3(numThreads), 0, 0, dInput, dOutput, R, C); //hipDeviceSynchronize(); CUDA_CALL(hipDeviceSynchronize()); // Copy the GPU memory back to the CPU CUDA_CALL(hipMemcpy(hOutput, dOutput, R*C*sizeof(float), hipMemcpyDeviceToHost)); // free the GPU memory CUDA_CALL(hipFree(dInput)); CUDA_CALL(hipFree(dOutput)); // printing result print2D(hOutput, R, C); // free the host memory free2D(hInput); free2D(hMask); free2D(hOutput); return 0; } // count number of rows and columns for the given input file void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2) { *r1 = 0; *c1 = 0; *r2 = 0; *c2 =0; char *buff = (char*)malloc(BUF_SIZE); FILE *f = fopen(fname, "rb"); if(f == NULL){ printf("Unable to open file %s\n",fname); free(buff); return; } fgets(buff, BUF_SIZE, f); *c1 = count_cols(buff); while(strlen(buff) > 1){ (*r1)++; fgets(buff, BUF_SIZE, f); } fgets(buff, BUF_SIZE, f); *c2 = count_cols(buff); while(strlen(buff) > 1){ (*r2)++; if(NULL == fgets(buff, BUF_SIZE, f)) break; if((feof(f)) && (strlen(buff) > 1) ){(*r2)++; break;} } free(buff); fclose(f); } // count number of columns in given buffer int count_cols(char *buff) { int i;int n=1; for(i=0; i<strlen(buff)-1; i++) { if(buff[i] == ' '){ if(buff[i+1] != '\n' && buff[i+1] != '\r' && buff[i+1] != ' '){ n++; } } } return n; } // print a 2D matrix void print2D(float *arr, int r, int c) { int i,j; for(i=0; i<r; i++){ for(j=0; j<c; j++){ if(j>0) printf(" "); printf("%f",arr[ i*r + j]); } printf("\n"); } } // allocate memory for matrix of size rxc float* alloc2D(int r, int c) { return (float*)malloc( r*c*sizeof(float) ); } // free memory void free2D(float *arr) { free(arr); } // parsing a matrix of size rxc void parse2D(FILE *f, float *arr, int r, int c) { int i,j; for(i=0; i<r; i++){ for(j=0; j<c; j++){ fscanf( f, "%f", &arr[ (i*c) + j] ); } } } void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py) { int i,j; int wStep = c + 2*px; int offset = py*wStep + px; for(i=0; i<r; i++){ for(j=0; j<c; j++){ fscanf( f, "%f", &arr[ offset + (i*wStep) + j] ); } } } void flip_kernel(float * arr, int r, int c) { float f; int i,j; int R = r-1; int C = c-1; for(i=0; i<=r/2; i++){ for(j=0; j<c; j++){ f = arr[i*c +j]; arr[i*c +j] = arr[(R-i)*c + (C-j)]; arr[(R-i)*c + (C-j)] = f; } } }
d9852ab2316d3657bed2a6e7c2157aa0f5e117a8.cu
/* * Prashant Solanki (Unity: psolank) * Simple Image convolutions implementation without tiling * Convolutions mask is stored in constant memory * Tested with CUDA Toolkit 3.0 */ #include <stdio.h> #include <stdlib.h> #include <string.h> //#include <cuda.h> #define BUF_SIZE 200000 #define ThreadsPerBlockX 8 #define ThreadsPerBlockY 8 #define CUDA_CALL(X) if(cudaSuccess != X) printf("Call Failed at %s\n",__LINE__); int count_cols(char *buff); void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2); void print2D(float *arr, int r, int c); float* alloc2D(int r, int c); void free2D(float *arr); void parse2D(FILE *f, float *arr, int r, int c); void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py); void flip_kernel(float * arr, int r, int c); // Constant cache memory to store convolution mask and its size __constant__ float dMask[100]; __constant__ int dHalfW; __constant__ int dHalfH; // kernel to convolve image with mask // one thread processes one pixel in input image __global__ void conv2DKernel(float *in, float *out, int r1, int c1) { int i,j; int x,y; int maskIndex = 0; // computing row and column of pixel for which convolution os to be done int r = blockIdx.y*blockDim.y + threadIdx.y; int c = blockIdx.x*blockDim.x + threadIdx.x; float acc = 0.0f; // accessing neighbouring pixels and multiplying with mask for(i = -dHalfH; i <= dHalfH; i++){ for(j = -dHalfW; j <= dHalfW; j++){ x = c + j; y = r + i; // condition to check if element is outside the image if(x >= 0 && x < c1 && y >= 0 && y < r1){ acc = acc + (dMask[maskIndex] * in[ y*c1 + x ]); } maskIndex++; } } // condition to check if element is outside image if(r < r1 && c < c1){ out[ r*c1 + c ] = acc; } } int main(int argc, char **argv) { float *hInput; float *hMask; float *hOutput; float *dInput; float *dOutput; int r1,c1,r2,c2, R, C; FILE *fptr; if(argc < 2) { printf(" Please specify input filename\n"); return -1;} // Finding dimensions of input matricex count_rc(argv[1],&r1, &c1, &r2, &c2); if(r1 == 0) return -1; // conputing dimensions of output matrix R = (r1 + r2) -1; C = (c1 + c2) -1; // allocating input matrices hInput = alloc2D(R, C); // zeroing the input matrix memset(hInput, 0, sizeof(float)*R*C); // allocation mask hMask = alloc2D(10, 10); // allocating output matix hOutput = alloc2D(R, C); // opening input file fptr = fopen(argv[1], "rb"); // parsing first matrix withing the padded region defined as c2/2 and r2/2 parse2DPadded(fptr, hInput, r1, c1, c2/2, r2/2); // parsing mask parse2D(fptr, hMask, r2, c2); // closing the file fclose(fptr); // flipping kernel vertically and horizontally flip_kernel(hMask, r2, c2); // print2D(hMask, r2, c2); r2 = r2/2; c2 = c2/2; // allocating gpu memory CUDA_CALL(cudaMalloc((void**)&dInput, R*C*sizeof(float))); //err = cudaMalloc((void**)&dMask, r2*c2*sizeof(float)); CUDA_CALL(cudaMalloc((void**)&dOutput, R*C*sizeof(float))); // Copy memory to the GPU CUDA_CALL(cudaMemcpy(dInput, hInput, sizeof(float)*R*C, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpyToSymbol(dMask, hMask, sizeof(float)*10*10, 0, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpyToSymbol(dHalfW, (const int*)&r2, sizeof(int), 0, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMemcpyToSymbol(dHalfH, (const int*)&c2, sizeof(int), 0, cudaMemcpyHostToDevice)); // Initialize the grid and block dimensions dim3 numThreads(ThreadsPerBlockX,ThreadsPerBlockY,1); dim3 numBlocks( ((C-1)/ThreadsPerBlockX)+1, ((R-1)/ThreadsPerBlockY)+1, 1 ); // Launch the GPU Kernel conv2DKernel<<<numBlocks, numThreads>>>(dInput, dOutput, R, C); //cudaDeviceSynchronize(); CUDA_CALL(cudaThreadSynchronize()); // Copy the GPU memory back to the CPU CUDA_CALL(cudaMemcpy(hOutput, dOutput, R*C*sizeof(float), cudaMemcpyDeviceToHost)); // free the GPU memory CUDA_CALL(cudaFree(dInput)); CUDA_CALL(cudaFree(dOutput)); // printing result print2D(hOutput, R, C); // free the host memory free2D(hInput); free2D(hMask); free2D(hOutput); return 0; } // count number of rows and columns for the given input file void count_rc(char *fname, int *r1, int *c1, int *r2, int *c2) { *r1 = 0; *c1 = 0; *r2 = 0; *c2 =0; char *buff = (char*)malloc(BUF_SIZE); FILE *f = fopen(fname, "rb"); if(f == NULL){ printf("Unable to open file %s\n",fname); free(buff); return; } fgets(buff, BUF_SIZE, f); *c1 = count_cols(buff); while(strlen(buff) > 1){ (*r1)++; fgets(buff, BUF_SIZE, f); } fgets(buff, BUF_SIZE, f); *c2 = count_cols(buff); while(strlen(buff) > 1){ (*r2)++; if(NULL == fgets(buff, BUF_SIZE, f)) break; if((feof(f)) && (strlen(buff) > 1) ){(*r2)++; break;} } free(buff); fclose(f); } // count number of columns in given buffer int count_cols(char *buff) { int i;int n=1; for(i=0; i<strlen(buff)-1; i++) { if(buff[i] == ' '){ if(buff[i+1] != '\n' && buff[i+1] != '\r' && buff[i+1] != ' '){ n++; } } } return n; } // print a 2D matrix void print2D(float *arr, int r, int c) { int i,j; for(i=0; i<r; i++){ for(j=0; j<c; j++){ if(j>0) printf(" "); printf("%f",arr[ i*r + j]); } printf("\n"); } } // allocate memory for matrix of size rxc float* alloc2D(int r, int c) { return (float*)malloc( r*c*sizeof(float) ); } // free memory void free2D(float *arr) { free(arr); } // parsing a matrix of size rxc void parse2D(FILE *f, float *arr, int r, int c) { int i,j; for(i=0; i<r; i++){ for(j=0; j<c; j++){ fscanf( f, "%f", &arr[ (i*c) + j] ); } } } void parse2DPadded(FILE *f, float *arr, int r, int c, int px, int py) { int i,j; int wStep = c + 2*px; int offset = py*wStep + px; for(i=0; i<r; i++){ for(j=0; j<c; j++){ fscanf( f, "%f", &arr[ offset + (i*wStep) + j] ); } } } void flip_kernel(float * arr, int r, int c) { float f; int i,j; int R = r-1; int C = c-1; for(i=0; i<=r/2; i++){ for(j=0; j<c; j++){ f = arr[i*c +j]; arr[i*c +j] = arr[(R-i)*c + (C-j)]; arr[(R-i)*c + (C-j)] = f; } } }
6cebd44f79c1b563d6d862d76403db187bdd7e22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void poli_warp(float* poli, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float x; if (idx < N) { x = poli[idx]; poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))- 1.0f/x + 3.0f/(x*x) + x/5.0f; } poli[idx] = x; }
6cebd44f79c1b563d6d862d76403db187bdd7e22.cu
#include "includes.h" __global__ void poli_warp(float* poli, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float x; if (idx < N) { x = poli[idx]; poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))- 1.0f/x + 3.0f/(x*x) + x/5.0f; } poli[idx] = x; }
e82f59737d6c21ebbe32904881d64abc3f7f372b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> #include <vector> #include <memory> #include <iostream> #include <hipfft.h> #include <hipfftXt.h> #include "fft.hpp" void init_fft() { } #define checkCudaErrors( expr, exception ) \ { \ auto cuda_result = expr; \ if( cuda_result != hipSuccess ) { \ std::cout << __FILE__ << " " << __LINE__ << std::endl; \ throw exception ( hipGetErrorString( hipGetLastError() ) ); \ }\ } #define checkCuFFTErrors( expr, exception ) \ { \ auto cuda_result = expr; \ if( cuda_result != HIPFFT_SUCCESS ) { \ std::cout << __FILE__ << " " << __LINE__ << " " << int( cuda_result ) << std::endl; \ throw exception ( hipGetErrorString( hipGetLastError() ) ); \ } \ } struct fft_detail { float *window; size_t resolution; float a; float b; size_t width; const float *reference; size_t reference_batch_count; float diff; float *envelope; size_t batch_offset; }; __device__ hipfftReal input_cb( void *src, size_t offset, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % detail->resolution; size_t batch = detail->batch_offset + offset / detail->resolution; int16_t element = ((int16_t*)src)[ index + size_t( batch * batch * detail->a + batch * detail->b ) ]; return ( hipfftReal )( element/32767.f * detail->window[ index ] ); } __device__ void output_cb( void *dataOut, size_t offset, hipfftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % ( (detail->resolution/2) + 1 ); size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { float abs = cuCabsf( element ); atomicAdd( detail->envelope + batch, abs ); ( (float*)dataOut )[ index + detail->width * batch ] = abs; } } __device__ void output_log_cb( void *dataOut, size_t offset, hipfftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % ( (detail->resolution/2) + 1 ); size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const int log_value = log_value_ > 255.f ? int( 255 ) : int( log_value_ ); ( (float*)dataOut )[ index + detail->width * batch ] = value; } } __device__ void output_comp_cb( void *dataOut, size_t offset, hipfftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; const size_t index = offset % ( (detail->resolution/2) + 1 ); const size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { if( batch < detail->reference_batch_count ) { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const int log_value = log_value_ > 255.f ? int( 255 ) : int( log_value_ ); atomicAdd( &detail->diff, fabsf( value - detail->reference[ index + detail->width * batch ] ) ); } else { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const float log_value = log_value_ > 255.f ? 255.f : float( log_value_ ); atomicAdd( &detail->diff, value ); } } } __device__ cufftCallbackLoadR input_cb_ptr_d = input_cb; __device__ cufftCallbackStoreC output_cb_ptr_d = output_cb; __device__ cufftCallbackStoreC output_log_cb_ptr_d = output_log_cb; __device__ cufftCallbackStoreC output_comp_cb_ptr_d = output_comp_cb; __global__ void generate_window( fft_detail *detail ) { size_t index = threadIdx.x + blockIdx.x * 1024; detail->window[ index ] = __sinf( float( M_PI ) * float( index ) / detail->resolution ); } __global__ void generate_window( float *window, unsigned int resolution ) { size_t index = threadIdx.x + blockIdx.x * 1024; window[ index ] = __sinf( float( M_PI ) * float( index ) / resolution ); } __global__ void add_lacking_batches( fft_detail *detail, size_t offset ) { size_t index = threadIdx.x + blockIdx.x * 1024u + offset; atomicAdd( &detail->diff, float( detail->reference[ index ] ) ); } using window_list_t = boost::container::flat_map< unsigned int, std::shared_ptr< float > >; window_list_t generate_window() { window_list_t result; for( unsigned int i = 16u; i != 65536u; i <<= 1 ) { float *window; checkCudaErrors( hipMalloc( &window, sizeof(float)*i ), fft_allocation_failed ); std::shared_ptr< float > wrapped( window, &hipFree ); if( i > 1024 )hipLaunchKernelGGL(( generate_window), dim3(i/1024), dim3(1024) , 0, 0, window, i ); elsehipLaunchKernelGGL(( generate_window), dim3(1), dim3(i) , 0, 0, window, i ); result.insert( result.end(), std::make_pair( i, wrapped ) ); } checkCudaErrors( hipDeviceSynchronize(), fft_initialization_failed ); return std::move( result ); } std::pair< std::vector< float >, std::shared_ptr< float > > fftref( const window_list_t &window, const std::vector< int16_t > &data, size_t resolution, float a, float b, size_t width ) { const auto window_iter = window.find( resolution ); if( window_iter == window.end() ) throw fft_initialization_failed( "invalid resolution" ); const float c = data.size() - resolution; const float x = ( -b + sqrtf( b*b + 4.f * a * c ) ) / ( 2.f * a ); const size_t batch = size_t( x ) == 0u ? 1u : size_t( x ); float *envelope_d; checkCudaErrors(hipMalloc( &envelope_d, sizeof(float)*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_envelope( envelope_d, &hipFree ); checkCudaErrors( hipMemset( envelope_d, 0, batch * sizeof(float) ), fft_initialization_failed ); fft_detail *detail; checkCudaErrors(hipMallocManaged( &detail, sizeof(fft_detail),hipMemAttachGlobal), fft_allocation_failed ); std::shared_ptr< fft_detail > wrapped_detail( detail, &hipFree ); detail->resolution = resolution; detail->a = a; detail->b = b; detail->width = width; detail->window = window_iter->second.get(); detail->envelope = envelope_d; detail->batch_offset = 0u; int16_t *input; size_t input_size = ::max( size_t(a*batch*batch+b*batch)+resolution, data.size() ); checkCudaErrors(hipMalloc( &input, sizeof(int16_t)*input_size), fft_allocation_failed ); std::shared_ptr< int16_t > wrapped_input( input, &hipFree ); checkCudaErrors(hipMemcpy( input, data.data(), sizeof(int16_t)*data.size(), hipMemcpyHostToDevice ), fft_data_transfar_failed ); if( input_size > data.size() ) { checkCudaErrors( hipMemset( input + data.size(), 0, sizeof( int16_t )*( input_size - data.size() ) ), fft_initialization_failed ); } float *output; checkCudaErrors(hipMalloc( &output, sizeof(float)*width*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_output( output, &hipFree ); cufftCallbackLoadR input_cb_ptr_h; checkCudaErrors( hipMemcpyFromSymbol( &input_cb_ptr_h, input_cb_ptr_d, sizeof( cufftCallbackLoadR ) ), fft_data_transfar_failed ); cufftCallbackStoreC output_cb_ptr_h; checkCudaErrors( hipMemcpyFromSymbol( &output_cb_ptr_h, output_log_cb_ptr_d, sizeof( cufftCallbackStoreC ) ), fft_data_transfar_failed ); for( size_t batch_offset = 0u; batch_offset < batch; batch_offset += 4200u ) { std::cout << a << " " << b << " " << x << " " << batch << " " << batch_offset << " " << ::min( batch - batch_offset, size_t( 4200u ) ) << std::endl; detail->batch_offset = batch_offset; hipfftHandle plan; checkCuFFTErrors( hipfftCreate( &plan ), fft_initialization_failed ); int signal_size = resolution; size_t work_size; checkCuFFTErrors( hipfftMakePlanMany( plan, 1, &signal_size, 0, 0, 0, 0, 0, 0, HIPFFT_R2C, ::min( batch - batch_offset, size_t( 4200u ) ), &work_size ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&input_cb_ptr_h, CUFFT_CB_LD_REAL, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&output_cb_ptr_h, CUFFT_CB_ST_COMPLEX, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( hipfftExecR2C( plan, (hipfftReal*)input, (hipfftComplex *)output ), fft_execution_failed ); checkCudaErrors( hipDeviceSynchronize(), fft_execution_failed ); checkCuFFTErrors( hipfftDestroy( plan ), fft_allocation_failed ); } std::vector< float > envelope_h( batch ); checkCudaErrors( hipMemcpy( envelope_h.data(), detail->envelope, sizeof(float)*batch, hipMemcpyDeviceToHost ), fft_data_transfar_failed ); return std::make_pair( std::move( envelope_h ), std::move( wrapped_output ) ); } std::pair< float, std::vector< float > > fftcomp( const float *ref, size_t reference_batch_count, const window_list_t &window, const std::vector< int16_t > &data, size_t resolution, float a, float b, size_t width ) { const auto window_iter = window.find( resolution ); if( window_iter == window.end() ) throw fft_initialization_failed( "invalid resolution" ); const float c = ( data.size() < resolution ) ? 0.f : float( data.size() - resolution ); const float x = ( -b + sqrtf( b*b + 4.f * a * c ) ) / ( 2.f * a ); const size_t batch = size_t( x ) == 0u ? 1u : size_t( x ); //const size_t batch = data.size() > resolution ? ( data.size() - resolution )/interval + 1 : 1u; float *envelope_d; checkCudaErrors(hipMalloc( &envelope_d, sizeof(float)*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_envelope( envelope_d, &hipFree ); checkCudaErrors( hipMemset( envelope_d, 0, batch * sizeof(float) ), fft_initialization_failed ); fft_detail *detail; checkCudaErrors(hipMallocManaged( &detail, sizeof(fft_detail),hipMemAttachGlobal), fft_allocation_failed ); std::shared_ptr< fft_detail > wrapped_detail( detail, &hipFree ); detail->resolution = resolution; detail->a = a; detail->b = b; detail->width = width; detail->window = window_iter->second.get(); detail->envelope = envelope_d; detail->reference = ref; detail->reference_batch_count = reference_batch_count; detail->diff = 0.0f; detail->batch_offset = 0u; int16_t *input; size_t input_size = ::max( size_t(a*batch*batch+b*batch)+resolution, data.size() ); //size_t input_size = ::max( data.size(), batch*interval ); //std::cout << a << " " << b << " " << c << " " << data.size() << " " << resolution << " " << x << " " << batch << " " << input_size << std::endl; checkCudaErrors(hipMallocManaged( &input, sizeof(int16_t)*input_size ), fft_allocation_failed ); std::shared_ptr< int16_t > wrapped_input( input, &hipFree ); checkCudaErrors(hipMemcpy( input, data.data(), sizeof(int16_t)*data.size(), hipMemcpyHostToDevice ), fft_data_transfar_failed ); if( input_size > data.size() ) { checkCudaErrors( hipMemset( input + data.size(), 0, sizeof( int16_t )*( input_size - data.size() ) ), fft_initialization_failed ); } cufftCallbackLoadR input_cb_ptr_h; checkCudaErrors( hipMemcpyFromSymbol( &input_cb_ptr_h, input_cb_ptr_d, sizeof( cufftCallbackLoadR ) ), fft_data_transfar_failed ); cufftCallbackStoreC output_cb_ptr_h; checkCudaErrors( hipMemcpyFromSymbol( &output_cb_ptr_h, output_comp_cb_ptr_d, sizeof( cufftCallbackStoreC ) ), fft_data_transfar_failed ); for( size_t batch_offset = 0u; batch_offset < batch; batch_offset += 4200u ) { detail->batch_offset = batch_offset; hipfftHandle plan; checkCuFFTErrors( hipfftCreate( &plan ), fft_initialization_failed ); int signal_size = resolution; size_t work_size; checkCuFFTErrors( hipfftMakePlanMany( plan, 1, &signal_size, 0, 0, 0, 0, 0, 0, HIPFFT_R2C, ::min( batch - batch_offset, size_t( 4200u ) ), &work_size ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&input_cb_ptr_h, CUFFT_CB_LD_REAL, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&output_cb_ptr_h, CUFFT_CB_ST_COMPLEX, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( hipfftExecR2C( plan, (hipfftReal*)input, (hipfftComplex *)nullptr ), fft_execution_failed ); checkCudaErrors( hipDeviceSynchronize(), fft_execution_failed ); checkCuFFTErrors( hipfftDestroy( plan ), fft_allocation_failed ); } if( batch < reference_batch_count ) { size_t left_count = ( reference_batch_count - batch ) * width; size_t left_block = left_count / 1024u; size_t left_mod = left_count % 1024u; if( left_block ) hipLaunchKernelGGL(( add_lacking_batches), dim3(left_block), dim3(1024u) , 0, 0, detail, size_t( batch * width ) ); if( left_mod ) hipLaunchKernelGGL(( add_lacking_batches), dim3(1u), dim3(left_mod) , 0, 0, detail, size_t( batch * width + left_block * 1024u ) ); } checkCudaErrors( hipDeviceSynchronize(), fft_execution_failed ); std::vector< float > envelope_h( batch ); checkCudaErrors( hipMemcpy( envelope_h.data(), detail->envelope, sizeof(float)*batch, hipMemcpyDeviceToHost ), fft_data_transfar_failed ); //std::cout << detail->diff << std::endl; return std::make_pair( detail->diff, std::move( envelope_h ) ); }
e82f59737d6c21ebbe32904881d64abc3f7f372b.cu
#include <cstdint> #include <vector> #include <memory> #include <iostream> #include <cufft.h> #include <cufftXt.h> #include "fft.hpp" void init_fft() { } #define checkCudaErrors( expr, exception ) \ { \ auto cuda_result = expr; \ if( cuda_result != cudaSuccess ) { \ std::cout << __FILE__ << " " << __LINE__ << std::endl; \ throw exception ( cudaGetErrorString( cudaGetLastError() ) ); \ }\ } #define checkCuFFTErrors( expr, exception ) \ { \ auto cuda_result = expr; \ if( cuda_result != CUFFT_SUCCESS ) { \ std::cout << __FILE__ << " " << __LINE__ << " " << int( cuda_result ) << std::endl; \ throw exception ( cudaGetErrorString( cudaGetLastError() ) ); \ } \ } struct fft_detail { float *window; size_t resolution; float a; float b; size_t width; const float *reference; size_t reference_batch_count; float diff; float *envelope; size_t batch_offset; }; __device__ cufftReal input_cb( void *src, size_t offset, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % detail->resolution; size_t batch = detail->batch_offset + offset / detail->resolution; int16_t element = ((int16_t*)src)[ index + size_t( batch * batch * detail->a + batch * detail->b ) ]; return ( cufftReal )( element/32767.f * detail->window[ index ] ); } __device__ void output_cb( void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % ( (detail->resolution/2) + 1 ); size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { float abs = cuCabsf( element ); atomicAdd( detail->envelope + batch, abs ); ( (float*)dataOut )[ index + detail->width * batch ] = abs; } } __device__ void output_log_cb( void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; size_t index = offset % ( (detail->resolution/2) + 1 ); size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const int log_value = log_value_ > 255.f ? int( 255 ) : int( log_value_ ); ( (float*)dataOut )[ index + detail->width * batch ] = value; } } __device__ void output_comp_cb( void *dataOut, size_t offset, cufftComplex element, void *callerInfo, void *sharedPtr ) { fft_detail *detail = (fft_detail*)callerInfo; const size_t index = offset % ( (detail->resolution/2) + 1 ); const size_t batch = detail->batch_offset + offset / ( (detail->resolution/2) + 1 ); if( index < detail->width ) { if( batch < detail->reference_batch_count ) { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const int log_value = log_value_ > 255.f ? int( 255 ) : int( log_value_ ); atomicAdd( &detail->diff, fabsf( value - detail->reference[ index + detail->width * batch ] ) ); } else { const float value = cuCabsf( element ); atomicAdd( detail->envelope + batch, value ); //const float log_value_ = 80.f * __log10f( value < 1.f ? 1.f : value ); //const float log_value = log_value_ > 255.f ? 255.f : float( log_value_ ); atomicAdd( &detail->diff, value ); } } } __device__ cufftCallbackLoadR input_cb_ptr_d = input_cb; __device__ cufftCallbackStoreC output_cb_ptr_d = output_cb; __device__ cufftCallbackStoreC output_log_cb_ptr_d = output_log_cb; __device__ cufftCallbackStoreC output_comp_cb_ptr_d = output_comp_cb; __global__ void generate_window( fft_detail *detail ) { size_t index = threadIdx.x + blockIdx.x * 1024; detail->window[ index ] = __sinf( float( M_PI ) * float( index ) / detail->resolution ); } __global__ void generate_window( float *window, unsigned int resolution ) { size_t index = threadIdx.x + blockIdx.x * 1024; window[ index ] = __sinf( float( M_PI ) * float( index ) / resolution ); } __global__ void add_lacking_batches( fft_detail *detail, size_t offset ) { size_t index = threadIdx.x + blockIdx.x * 1024u + offset; atomicAdd( &detail->diff, float( detail->reference[ index ] ) ); } using window_list_t = boost::container::flat_map< unsigned int, std::shared_ptr< float > >; window_list_t generate_window() { window_list_t result; for( unsigned int i = 16u; i != 65536u; i <<= 1 ) { float *window; checkCudaErrors( cudaMalloc( &window, sizeof(float)*i ), fft_allocation_failed ); std::shared_ptr< float > wrapped( window, &cudaFree ); if( i > 1024 ) generate_window<<< i/1024, 1024 >>>( window, i ); else generate_window<<< 1, i >>>( window, i ); result.insert( result.end(), std::make_pair( i, wrapped ) ); } checkCudaErrors( cudaDeviceSynchronize(), fft_initialization_failed ); return std::move( result ); } std::pair< std::vector< float >, std::shared_ptr< float > > fftref( const window_list_t &window, const std::vector< int16_t > &data, size_t resolution, float a, float b, size_t width ) { const auto window_iter = window.find( resolution ); if( window_iter == window.end() ) throw fft_initialization_failed( "invalid resolution" ); const float c = data.size() - resolution; const float x = ( -b + sqrtf( b*b + 4.f * a * c ) ) / ( 2.f * a ); const size_t batch = size_t( x ) == 0u ? 1u : size_t( x ); float *envelope_d; checkCudaErrors(cudaMalloc( &envelope_d, sizeof(float)*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_envelope( envelope_d, &cudaFree ); checkCudaErrors( cudaMemset( envelope_d, 0, batch * sizeof(float) ), fft_initialization_failed ); fft_detail *detail; checkCudaErrors(cudaMallocManaged( &detail, sizeof(fft_detail),cudaMemAttachGlobal), fft_allocation_failed ); std::shared_ptr< fft_detail > wrapped_detail( detail, &cudaFree ); detail->resolution = resolution; detail->a = a; detail->b = b; detail->width = width; detail->window = window_iter->second.get(); detail->envelope = envelope_d; detail->batch_offset = 0u; int16_t *input; size_t input_size = std::max( size_t(a*batch*batch+b*batch)+resolution, data.size() ); checkCudaErrors(cudaMalloc( &input, sizeof(int16_t)*input_size), fft_allocation_failed ); std::shared_ptr< int16_t > wrapped_input( input, &cudaFree ); checkCudaErrors(cudaMemcpy( input, data.data(), sizeof(int16_t)*data.size(), cudaMemcpyHostToDevice ), fft_data_transfar_failed ); if( input_size > data.size() ) { checkCudaErrors( cudaMemset( input + data.size(), 0, sizeof( int16_t )*( input_size - data.size() ) ), fft_initialization_failed ); } float *output; checkCudaErrors(cudaMalloc( &output, sizeof(float)*width*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_output( output, &cudaFree ); cufftCallbackLoadR input_cb_ptr_h; checkCudaErrors( cudaMemcpyFromSymbol( &input_cb_ptr_h, input_cb_ptr_d, sizeof( cufftCallbackLoadR ) ), fft_data_transfar_failed ); cufftCallbackStoreC output_cb_ptr_h; checkCudaErrors( cudaMemcpyFromSymbol( &output_cb_ptr_h, output_log_cb_ptr_d, sizeof( cufftCallbackStoreC ) ), fft_data_transfar_failed ); for( size_t batch_offset = 0u; batch_offset < batch; batch_offset += 4200u ) { std::cout << a << " " << b << " " << x << " " << batch << " " << batch_offset << " " << std::min( batch - batch_offset, size_t( 4200u ) ) << std::endl; detail->batch_offset = batch_offset; cufftHandle plan; checkCuFFTErrors( cufftCreate( &plan ), fft_initialization_failed ); int signal_size = resolution; size_t work_size; checkCuFFTErrors( cufftMakePlanMany( plan, 1, &signal_size, 0, 0, 0, 0, 0, 0, CUFFT_R2C, std::min( batch - batch_offset, size_t( 4200u ) ), &work_size ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&input_cb_ptr_h, CUFFT_CB_LD_REAL, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&output_cb_ptr_h, CUFFT_CB_ST_COMPLEX, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftExecR2C( plan, (cufftReal*)input, (cufftComplex *)output ), fft_execution_failed ); checkCudaErrors( cudaDeviceSynchronize(), fft_execution_failed ); checkCuFFTErrors( cufftDestroy( plan ), fft_allocation_failed ); } std::vector< float > envelope_h( batch ); checkCudaErrors( cudaMemcpy( envelope_h.data(), detail->envelope, sizeof(float)*batch, cudaMemcpyDeviceToHost ), fft_data_transfar_failed ); return std::make_pair( std::move( envelope_h ), std::move( wrapped_output ) ); } std::pair< float, std::vector< float > > fftcomp( const float *ref, size_t reference_batch_count, const window_list_t &window, const std::vector< int16_t > &data, size_t resolution, float a, float b, size_t width ) { const auto window_iter = window.find( resolution ); if( window_iter == window.end() ) throw fft_initialization_failed( "invalid resolution" ); const float c = ( data.size() < resolution ) ? 0.f : float( data.size() - resolution ); const float x = ( -b + sqrtf( b*b + 4.f * a * c ) ) / ( 2.f * a ); const size_t batch = size_t( x ) == 0u ? 1u : size_t( x ); //const size_t batch = data.size() > resolution ? ( data.size() - resolution )/interval + 1 : 1u; float *envelope_d; checkCudaErrors(cudaMalloc( &envelope_d, sizeof(float)*batch), fft_allocation_failed ); std::shared_ptr< float > wrapped_envelope( envelope_d, &cudaFree ); checkCudaErrors( cudaMemset( envelope_d, 0, batch * sizeof(float) ), fft_initialization_failed ); fft_detail *detail; checkCudaErrors(cudaMallocManaged( &detail, sizeof(fft_detail),cudaMemAttachGlobal), fft_allocation_failed ); std::shared_ptr< fft_detail > wrapped_detail( detail, &cudaFree ); detail->resolution = resolution; detail->a = a; detail->b = b; detail->width = width; detail->window = window_iter->second.get(); detail->envelope = envelope_d; detail->reference = ref; detail->reference_batch_count = reference_batch_count; detail->diff = 0.0f; detail->batch_offset = 0u; int16_t *input; size_t input_size = std::max( size_t(a*batch*batch+b*batch)+resolution, data.size() ); //size_t input_size = std::max( data.size(), batch*interval ); //std::cout << a << " " << b << " " << c << " " << data.size() << " " << resolution << " " << x << " " << batch << " " << input_size << std::endl; checkCudaErrors(cudaMallocManaged( &input, sizeof(int16_t)*input_size ), fft_allocation_failed ); std::shared_ptr< int16_t > wrapped_input( input, &cudaFree ); checkCudaErrors(cudaMemcpy( input, data.data(), sizeof(int16_t)*data.size(), cudaMemcpyHostToDevice ), fft_data_transfar_failed ); if( input_size > data.size() ) { checkCudaErrors( cudaMemset( input + data.size(), 0, sizeof( int16_t )*( input_size - data.size() ) ), fft_initialization_failed ); } cufftCallbackLoadR input_cb_ptr_h; checkCudaErrors( cudaMemcpyFromSymbol( &input_cb_ptr_h, input_cb_ptr_d, sizeof( cufftCallbackLoadR ) ), fft_data_transfar_failed ); cufftCallbackStoreC output_cb_ptr_h; checkCudaErrors( cudaMemcpyFromSymbol( &output_cb_ptr_h, output_comp_cb_ptr_d, sizeof( cufftCallbackStoreC ) ), fft_data_transfar_failed ); for( size_t batch_offset = 0u; batch_offset < batch; batch_offset += 4200u ) { detail->batch_offset = batch_offset; cufftHandle plan; checkCuFFTErrors( cufftCreate( &plan ), fft_initialization_failed ); int signal_size = resolution; size_t work_size; checkCuFFTErrors( cufftMakePlanMany( plan, 1, &signal_size, 0, 0, 0, 0, 0, 0, CUFFT_R2C, std::min( batch - batch_offset, size_t( 4200u ) ), &work_size ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&input_cb_ptr_h, CUFFT_CB_LD_REAL, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftXtSetCallback( plan, (void **)&output_cb_ptr_h, CUFFT_CB_ST_COMPLEX, (void **)&detail ), fft_initialization_failed ); checkCuFFTErrors( cufftExecR2C( plan, (cufftReal*)input, (cufftComplex *)nullptr ), fft_execution_failed ); checkCudaErrors( cudaDeviceSynchronize(), fft_execution_failed ); checkCuFFTErrors( cufftDestroy( plan ), fft_allocation_failed ); } if( batch < reference_batch_count ) { size_t left_count = ( reference_batch_count - batch ) * width; size_t left_block = left_count / 1024u; size_t left_mod = left_count % 1024u; if( left_block ) add_lacking_batches<<< left_block, 1024u >>>( detail, size_t( batch * width ) ); if( left_mod ) add_lacking_batches<<< 1u, left_mod >>>( detail, size_t( batch * width + left_block * 1024u ) ); } checkCudaErrors( cudaDeviceSynchronize(), fft_execution_failed ); std::vector< float > envelope_h( batch ); checkCudaErrors( cudaMemcpy( envelope_h.data(), detail->envelope, sizeof(float)*batch, cudaMemcpyDeviceToHost ), fft_data_transfar_failed ); //std::cout << detail->diff << std::endl; return std::make_pair( detail->diff, std::move( envelope_h ) ); }
90ae739d82bdacb854c0fbe92e15dd3dffe65573.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // for inner product of two vectors // or vector dot product // Jun 7, 2018 by Zheshu Wu #include "./common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; __global__ void dot( float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; // increment by available numer of threads tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in THIS BLOCK __syncthreads(); // reduction of sum int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } // only need one thread to execute this assignment if (cacheIndex == 0) { c[blockIdx.x] = cache[0]; } } const int blocksPerGrid = imin( 32, (N + threadsPerBlock - 1) / threadsPerBlock); int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memoery on the CPU side a = new float[N]; b = new float[N]; partial_c = new float[blocksPerGrid]; // allocate memory on the GPU hipMalloc( (void**)&dev_a, N * sizeof(float)); hipMalloc( (void**)&dev_b, N * sizeof(float)); hipMalloc( (void**)&dev_partial_c, blocksPerGrid * sizeof(float)); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i * 2; } // copy the arrays 'a' and 'b' to the GPU hipMemcpy( dev_a, a, N * sizeof(float), hipMemcpyHostToDevice); hipMemcpy( dev_b, b, N * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c); // copy the partial result back to CPU hipMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost); // finish up on the CPU side c = 0; for(int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( " Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float) (N-1) )); // free memory on the GPU side hipFree( dev_a ); hipFree( dev_b ); hipFree( dev_partial_c ); // compute the dot product on CPU only float sum_cpu = 0; for(int i=0; i<N; i++) { sum_cpu += a[i] * b[i]; } printf("sum_cpu = %f \n", sum_cpu); // free the memeory on the CPU side delete [] a; delete [] b; delete [] partial_c; }
90ae739d82bdacb854c0fbe92e15dd3dffe65573.cu
// for inner product of two vectors // or vector dot product // Jun 7, 2018 by Zheshu Wu #include "./common/book.h" #define imin(a,b) (a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; __global__ void dot( float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; // increment by available numer of threads tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in THIS BLOCK __syncthreads(); // reduction of sum int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } // only need one thread to execute this assignment if (cacheIndex == 0) { c[blockIdx.x] = cache[0]; } } const int blocksPerGrid = imin( 32, (N + threadsPerBlock - 1) / threadsPerBlock); int main( void ) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; // allocate memoery on the CPU side a = new float[N]; b = new float[N]; partial_c = new float[blocksPerGrid]; // allocate memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(float)); cudaMalloc( (void**)&dev_b, N * sizeof(float)); cudaMalloc( (void**)&dev_partial_c, blocksPerGrid * sizeof(float)); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i * 2; } // copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice); dot<<<blocksPerGrid, threadsPerBlock>>>( dev_a, dev_b, dev_partial_c); // copy the partial result back to CPU cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost); // finish up on the CPU side c = 0; for(int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( " Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float) (N-1) )); // free memory on the GPU side cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_partial_c ); // compute the dot product on CPU only float sum_cpu = 0; for(int i=0; i<N; i++) { sum_cpu += a[i] * b[i]; } printf("sum_cpu = %f \n", sum_cpu); // free the memeory on the CPU side delete [] a; delete [] b; delete [] partial_c; }
e27ba804650d031b834530e5b84cbca5f654bd79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef MY_LAPLACIAN_CU #define MY_LAPLACIAN_CU extern "C" __global__ void laplacian(double* y, const double* x, const long n) { long tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < n * n * n) { long i, j, k; i = tid / (n * n); j = (tid - i * n * n) / n; k = (tid - i * n * n) % n; y[tid] += x[tid] - x[((i + 1) % n) * n * n + j * n + k]; y[tid] += x[tid] - x[((i + n - 1) % n) * n * n + j * n + k]; y[tid] += x[tid] - x[i * n * n + ((j + 1) % n) * n + k]; y[tid] += x[tid] - x[i * n * n + ((j + n - 1) % n) * n + k]; y[tid] += x[tid] - x[i * n * n + j * n + ((k + 1) % n)]; y[tid] += x[tid] - x[i * n * n + j * n + ((k + n - 1) % n)]; } } #endif
e27ba804650d031b834530e5b84cbca5f654bd79.cu
#ifndef MY_LAPLACIAN_CU #define MY_LAPLACIAN_CU extern "C" __global__ void laplacian(double* y, const double* x, const long n) { long tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < n * n * n) { long i, j, k; i = tid / (n * n); j = (tid - i * n * n) / n; k = (tid - i * n * n) % n; y[tid] += x[tid] - x[((i + 1) % n) * n * n + j * n + k]; y[tid] += x[tid] - x[((i + n - 1) % n) * n * n + j * n + k]; y[tid] += x[tid] - x[i * n * n + ((j + 1) % n) * n + k]; y[tid] += x[tid] - x[i * n * n + ((j + n - 1) % n) * n + k]; y[tid] += x[tid] - x[i * n * n + j * n + ((k + 1) % n)]; y[tid] += x[tid] - x[i * n * n + j * n + ((k + n - 1) % n)]; } } #endif
8dc625ffd1ef55addf2e801c95f7d34a4a8510fd.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> using namespace std; /* Mirror operations */ __global__ void mirror(uchar4* inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical) { int col = blockIdx.x * blockDim.x + threadIdx.x; int stripe = blockDim.x * gridDim.x; for(int i=col; i<numRows*numCols; i=i+stripe) { unsigned char Y = 0.299 * inputChannel[i].x + 0.587 * inputChannel[i].y + 0.114 * inputChannel[i].z; if(vertical) outputChannel[i/numCols*numCols+(numCols-i%numCols)-1] = make_uchar4(Y, Y, Y, 255); else outputChannel[(numRows- (i/numCols) -1)*numCols +(i%numCols)] = make_uchar4(Y, Y, Y, 255); } } uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical) { //Creat Timing Event hipEvent_t start, stop; hipEventCreate (&start); hipEventCreate (&stop); //Set reasonable block size (i.e., number of threads per block) dim3 blockSize(9); //Calculate Grid SIze dim3 gridSize(6); //Calculate number of pixels size_t numPixels = numRows * numCols; //Allocate Memory Space on Device for output image uchar4 *d_outputImageRGBA; hipMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels); //start Timer hipEventRecord(start, 0); //Call mirror kernel. hipLaunchKernelGGL(( mirror), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical); //Stop Timer hipEventRecord(stop, 0); hipEventSynchronize(stop); hipDeviceSynchronize(); //Initialize memory on host for output uchar4* uchar4* h_out; h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels); //Copy output from device to host hipMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, hipMemcpyDeviceToHost); //Cleanup memory on device hipFree(d_inputImageRGBA); hipFree(d_outputImageRGBA); //Calculate Elapsed Time float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("GPU time = %5.2f ms\n", elapsedTime); //return h_out return h_out; }
8dc625ffd1ef55addf2e801c95f7d34a4a8510fd.cu
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; /* Mirror operations */ __global__ void mirror(uchar4* inputChannel, uchar4* outputChannel, int numRows, int numCols, bool vertical) { int col = blockIdx.x * blockDim.x + threadIdx.x; int stripe = blockDim.x * gridDim.x; for(int i=col; i<numRows*numCols; i=i+stripe) { unsigned char Y = 0.299 * inputChannel[i].x + 0.587 * inputChannel[i].y + 0.114 * inputChannel[i].z; if(vertical) outputChannel[i/numCols*numCols+(numCols-i%numCols)-1] = make_uchar4(Y, Y, Y, 255); else outputChannel[(numRows- (i/numCols) -1)*numCols +(i%numCols)] = make_uchar4(Y, Y, Y, 255); } } uchar4* mirror_ops(uchar4 *d_inputImageRGBA, size_t numRows, size_t numCols, bool vertical) { //Creat Timing Event cudaEvent_t start, stop; cudaEventCreate (&start); cudaEventCreate (&stop); //Set reasonable block size (i.e., number of threads per block) dim3 blockSize(9); //Calculate Grid SIze dim3 gridSize(6); //Calculate number of pixels size_t numPixels = numRows * numCols; //Allocate Memory Space on Device for output image uchar4 *d_outputImageRGBA; cudaMalloc(&d_outputImageRGBA, sizeof(uchar4) * numPixels); //start Timer cudaEventRecord(start, 0); //Call mirror kernel. mirror<<<gridSize, blockSize>>>(d_inputImageRGBA, d_outputImageRGBA, numRows, numCols, vertical); //Stop Timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaDeviceSynchronize(); //Initialize memory on host for output uchar4* uchar4* h_out; h_out = (uchar4*)malloc(sizeof(uchar4) * numPixels); //Copy output from device to host cudaMemcpy(h_out, d_outputImageRGBA, sizeof(uchar4) * numPixels, cudaMemcpyDeviceToHost); //Cleanup memory on device cudaFree(d_inputImageRGBA); cudaFree(d_outputImageRGBA); //Calculate Elapsed Time float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("GPU time = %5.2f ms\n", elapsedTime); //return h_out return h_out; }
49a52781ec1c285cf3f53d89e85fcf43af9d8e02.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaAssert.h> #include <CusparseFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> namespace NeoML { // result = first * T(second). The result size is firstHeight * secondHeight: void CCudaMathEngine::MultiplySparseMatrixByTransposedMatrix( int firstHeight, int firstWidth, int secondHeight, const CSparseMatrixDesc& firstDesc, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle ) { ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); CFloatHandleStackVar tResult( mathEngine(), firstHeight * secondHeight ); CFloatHandle tResultPtr = tResult.GetHandle(); hipsparseMatDescr_t description = 0; ASSERT_CUSPARSE( cusparse->CreateMatDescr( &description ) ); ASSERT_CUSPARSE( cusparse->SetMatType( description, HIPSPARSE_MATRIX_TYPE_GENERAL ) ); ASSERT_CUSPARSE( cusparse->SetMatIndexBase( description, HIPSPARSE_INDEX_BASE_ZERO ) ); const int* firstRows = GetRaw( firstDesc.Rows ); const float* firstValues = GetRaw( firstDesc.Values ); const int* firstColumns = GetRaw( firstDesc.Columns ); float alpha = 1.0; float beta = 0.0; ASSERT_CUSPARSE( cusparse->Scsrmm( cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, firstHeight, secondHeight, firstWidth, firstDesc.ElementCount, &alpha, description, firstValues, firstRows, firstColumns, GetRaw( secondHandle ), firstWidth, &beta, GetRaw( tResultPtr ), firstHeight ) ); ASSERT_CUSPARSE( cusparse->DestroyMatDescr( description ) ); TransposeMatrix( 1, tResultPtr, secondHeight, 1, firstHeight, 1, resultHandle, static_cast<int>( tResult.Size() ) ); } // result = result + T(first) * second. The result size is firstWidth * secondWidth: void CCudaMathEngine::MultiplyTransposedMatrixBySparseMatrixAndAdd( int firstHeight, int firstWidth, int secondWidth, const CConstFloatHandle& first, const CSparseMatrixDesc& secondDesc, const CFloatHandle& resultHandle ) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); // C = A * B = T( T(B) * A ) // Transpose first CFloatHandleStackVar tFirst( mathEngine(), firstHeight * firstWidth ); CFloatHandle tFirstPtr = tFirst.GetHandle(); TransposeMatrix( 1, first, firstHeight, 1, firstWidth, 1, tFirstPtr, static_cast<int>( tFirst.Size() ) ); hipsparseMatDescr_t description = 0; ASSERT_CUSPARSE( cusparse->CreateMatDescr( &description ) ); ASSERT_CUSPARSE( cusparse->SetMatType( description, HIPSPARSE_MATRIX_TYPE_GENERAL ) ); ASSERT_CUSPARSE( cusparse->SetMatIndexBase( description, HIPSPARSE_INDEX_BASE_ZERO ) ); // Calculate T( T(B) * A ): const int* secondRows = GetRaw( secondDesc.Rows ); const float* secondValues = GetRaw( secondDesc.Values ); const int* secondColumns = GetRaw( secondDesc.Columns ); float alpha = 1.0; float beta = 1.0; ASSERT_CUSPARSE( cusparse->Scsrmm2( cusparseHandle, HIPSPARSE_OPERATION_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, firstHeight, firstWidth, secondWidth, secondDesc.ElementCount, &alpha, description, secondValues, secondRows, secondColumns, GetRaw( tFirstPtr ), firstHeight, &beta, GetRaw( resultHandle ), secondWidth ) ); ASSERT_CUSPARSE( cusparse->DestroyMatDescr( description ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
49a52781ec1c285cf3f53d89e85fcf43af9d8e02.cu
/* Copyright © 2017-2020 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------------------------------------------------------------------*/ #include <NeoMathEngine/NeoMathEngineDefs.h> #ifdef NEOML_USE_CUDA #include <CudaMathEngine.h> #include <CudaAssert.h> #include <CusparseFunctions.h> #include <MathEngineCommon.h> #include <MemoryHandleInternal.h> namespace NeoML { // result = first * T(second). The result size is firstHeight * secondHeight: void CCudaMathEngine::MultiplySparseMatrixByTransposedMatrix( int firstHeight, int firstWidth, int secondHeight, const CSparseMatrixDesc& firstDesc, const CConstFloatHandle& secondHandle, const CFloatHandle& resultHandle ) { ASSERT_EXPR( secondHandle.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); CFloatHandleStackVar tResult( mathEngine(), firstHeight * secondHeight ); CFloatHandle tResultPtr = tResult.GetHandle(); cusparseMatDescr_t description = 0; ASSERT_CUSPARSE( cusparse->CreateMatDescr( &description ) ); ASSERT_CUSPARSE( cusparse->SetMatType( description, CUSPARSE_MATRIX_TYPE_GENERAL ) ); ASSERT_CUSPARSE( cusparse->SetMatIndexBase( description, CUSPARSE_INDEX_BASE_ZERO ) ); const int* firstRows = GetRaw( firstDesc.Rows ); const float* firstValues = GetRaw( firstDesc.Values ); const int* firstColumns = GetRaw( firstDesc.Columns ); float alpha = 1.0; float beta = 0.0; ASSERT_CUSPARSE( cusparse->Scsrmm( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, firstHeight, secondHeight, firstWidth, firstDesc.ElementCount, &alpha, description, firstValues, firstRows, firstColumns, GetRaw( secondHandle ), firstWidth, &beta, GetRaw( tResultPtr ), firstHeight ) ); ASSERT_CUSPARSE( cusparse->DestroyMatDescr( description ) ); TransposeMatrix( 1, tResultPtr, secondHeight, 1, firstHeight, 1, resultHandle, static_cast<int>( tResult.Size() ) ); } // result = result + T(first) * second. The result size is firstWidth * secondWidth: void CCudaMathEngine::MultiplyTransposedMatrixBySparseMatrixAndAdd( int firstHeight, int firstWidth, int secondWidth, const CConstFloatHandle& first, const CSparseMatrixDesc& secondDesc, const CFloatHandle& resultHandle ) { ASSERT_EXPR( first.GetMathEngine() == this ); ASSERT_EXPR( resultHandle.GetMathEngine() == this ); // C = A * B = T( T(B) * A ) // Transpose first CFloatHandleStackVar tFirst( mathEngine(), firstHeight * firstWidth ); CFloatHandle tFirstPtr = tFirst.GetHandle(); TransposeMatrix( 1, first, firstHeight, 1, firstWidth, 1, tFirstPtr, static_cast<int>( tFirst.Size() ) ); cusparseMatDescr_t description = 0; ASSERT_CUSPARSE( cusparse->CreateMatDescr( &description ) ); ASSERT_CUSPARSE( cusparse->SetMatType( description, CUSPARSE_MATRIX_TYPE_GENERAL ) ); ASSERT_CUSPARSE( cusparse->SetMatIndexBase( description, CUSPARSE_INDEX_BASE_ZERO ) ); // Calculate T( T(B) * A ): const int* secondRows = GetRaw( secondDesc.Rows ); const float* secondValues = GetRaw( secondDesc.Values ); const int* secondColumns = GetRaw( secondDesc.Columns ); float alpha = 1.0; float beta = 1.0; ASSERT_CUSPARSE( cusparse->Scsrmm2( cusparseHandle, CUSPARSE_OPERATION_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, firstHeight, firstWidth, secondWidth, secondDesc.ElementCount, &alpha, description, secondValues, secondRows, secondColumns, GetRaw( tFirstPtr ), firstHeight, &beta, GetRaw( resultHandle ), secondWidth ) ); ASSERT_CUSPARSE( cusparse->DestroyMatDescr( description ) ); } } // namespace NeoML #endif // NEOML_USE_CUDA
0d1ef62221a19c04d04deadf524556dabcedf7fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Normalize.h" #include "Normalize.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* normalized the data with normal distribution (kernel code). For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input data array >> output - the output data array >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to go over for next block >> blockNum - how many blocks we have */ template<class T, TENSOR_DATA_TYPE datatype> __global__ void KernelNormalize(T * input, T * output, T * mean, T * var, T * a, T * b, T epsilon, int stride, int strideNum, int blockNum) { __shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = var[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; if (datatype == DEFAULT_DTYPE) { output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x])) / sqrt((DTYPE)(iVar[threadIdx.x] + epsilon)) + (DTYPE)b[inBlockOffset]; } else if (datatype == X_FLOAT16) { #if __CUDA_ARCH__ >= 600 output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])), hsqrt(iVar[threadIdx.x] + epsilon)), __float2half(b[inBlockOffset])); #endif } } /* normalized the data with normal distribution. For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input tensor >> output - the output tensor >> dim - dimension alone which we generate the mean and variance >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter */ void _CudaNormalize(const XTensor * input, XTensor * output, int dim, const XTensor * mean, const XTensor * var, const XTensor * a, const XTensor * b, DTYPE epsilon) { CheckNTErrors((input->dataType == DEFAULT_DTYPE), "TODO!"); int stride = 1; int strideNum = input->dimSize[dim]; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i > dim) stride *= input->dimSize[i]; else if (i < dim) blockNum *= input->dimSize[i]; } int cudaGridSize[3]; int cudaBlockSize[3]; GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]); dim3 threads(cudaBlockSize[1], cudaBlockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (input->dataType == DEFAULT_DTYPE) { KernelNormalize <DTYPE, DEFAULT_DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data, (DTYPE*)mean->data, (DTYPE*)var->data, (DTYPE*)a->data, (DTYPE*)b->data, epsilon, stride, strideNum, blockNum); } else if (input->dataType == X_FLOAT16) { #ifdef HALF_PRECISION __half epsilon1 = __float2half(epsilon); hipLaunchKernelGGL(( KernelNormalize <__half, X_FLOAT16>) , dim3(blocks), dim3(threads), 0, 0, (__half*)input->data, (__half*)output->data, (__half*)mean->data, (__half*)var->data, (__half*)a->data, (__half*)b->data, epsilon1, stride, strideNum, blockNum); #endif } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
0d1ef62221a19c04d04deadf524556dabcedf7fe.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Normalize.h" #include "Normalize.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* normalized the data with normal distribution (kernel code). For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input data array >> output - the output data array >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter >> stride - stride that we need to move to the next item >> strideNum - how many strides we need to go over for next block >> blockNum - how many blocks we have */ template<class T, TENSOR_DATA_TYPE datatype> __global__ void KernelNormalize(T * input, T * output, T * mean, T * var, T * a, T * b, T epsilon, int stride, int strideNum, int blockNum) { __shared__ T iMean[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T iVar[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int blockSize; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= stride * blockNum || j >= strideNum) return; if (threadIdx.y == 0) { iOffset[threadIdx.x] = i % stride; iBlock[threadIdx.x] = i / stride; iMean[threadIdx.x] = mean[i]; iVar[threadIdx.x] = var[i]; blockSize = stride * strideNum; } __syncthreads(); int inBlockOffset = j * stride + iOffset[threadIdx.x]; int offset = iBlock[threadIdx.x] * blockSize + inBlockOffset; if (datatype == DEFAULT_DTYPE) { output[offset] = (DTYPE)(a[inBlockOffset] * (input[offset] - iMean[threadIdx.x])) / sqrt((DTYPE)(iVar[threadIdx.x] + epsilon)) + (DTYPE)b[inBlockOffset]; } else if (datatype == X_FLOAT16) { #if __CUDA_ARCH__ >= 600 output[offset] = __hadd(__hdiv(__hmul(a[inBlockOffset], __hsub(input[offset], iMean[threadIdx.x])), hsqrt(iVar[threadIdx.x] + epsilon)), __float2half(b[inBlockOffset])); #endif } } /* normalized the data with normal distribution. For an input x, y = a * (x-mean)/sqrt(variance+\epsilon) + b where a and b are the scalar and bias respectively, and \epsilon is the adjustment parameter >> input - the input tensor >> output - the output tensor >> dim - dimension alone which we generate the mean and variance >> mean - the mean of the input >> var - the variance of the input >> a - the scalar >> b - the bias >> epsilon - a parameter */ void _CudaNormalize(const XTensor * input, XTensor * output, int dim, const XTensor * mean, const XTensor * var, const XTensor * a, const XTensor * b, DTYPE epsilon) { CheckNTErrors((input->dataType == DEFAULT_DTYPE), "TODO!"); int stride = 1; int strideNum = input->dimSize[dim]; int blockNum = 1; for (int i = 0; i < input->order; i++) { if (i > dim) stride *= input->dimSize[i]; else if (i < dim) blockNum *= input->dimSize[i]; } int cudaGridSize[3]; int cudaBlockSize[3]; GDevs.GetCudaThread2D(input->devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); dim3 blocks(cudaGridSize[1], cudaGridSize[0]); dim3 threads(cudaBlockSize[1], cudaBlockSize[0]); int devIDBackup; ProtectCudaDev(a->devID, devIDBackup); if (input->dataType == DEFAULT_DTYPE) { KernelNormalize <DTYPE, DEFAULT_DTYPE><< <blocks, threads >> >((DTYPE*)input->data, (DTYPE*)output->data, (DTYPE*)mean->data, (DTYPE*)var->data, (DTYPE*)a->data, (DTYPE*)b->data, epsilon, stride, strideNum, blockNum); } else if (input->dataType == X_FLOAT16) { #ifdef HALF_PRECISION __half epsilon1 = __float2half(epsilon); KernelNormalize <__half, X_FLOAT16> <<<blocks, threads>>> ((__half*)input->data, (__half*)output->data, (__half*)mean->data, (__half*)var->data, (__half*)a->data, (__half*)b->data, epsilon1, stride, strideNum, blockNum); #endif } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
e9b141019c3a75869286ca7904ec169bbf03fc21.hip
// !!! This is a file automatically generated by hipify!!! #include "Tensor.h" #include "Common.cuh" #include <iostream> using namespace std; extern hipError_t cudaStatus; Tensor::Tensor(vector<int> shape, float* data) : shape(shape) { // Calculate size & dims size = 1; dims = 0; for (int s : shape) { size *= s; dims += 1; } // Copy data this->data.reserve(size); for (int i = 0; i < size; i++) { float value = data == 0 ? 0 : data[i]; this->data.push_back(value); } // Upload data onto GPU CUDATensor d_data = createCudaTensor(); HE(hipMemcpy(d_data.data, &(this->data[0]), size * sizeof(float), hipMemcpyHostToDevice)); HE(hipMalloc((void**)&(cuda_data), sizeof(CUDATensor))); HE(hipMalloc((void**)&(cuda_grad), sizeof(CUDATensor))); HE(hipMalloc((void**)&(cuda_sens), sizeof(CUDATensor))); HE(hipMemcpy(cuda_data, &d_data, sizeof(CUDATensor), hipMemcpyHostToDevice)); HE(hipMemcpy(cuda_grad, &createCudaTensor(), sizeof(CUDATensor), hipMemcpyHostToDevice)); HE(hipMemcpy(cuda_sens, &createCudaTensor(), sizeof(CUDATensor), hipMemcpyHostToDevice)); } CUDATensor Tensor::createCudaTensor() { CUDATensor result; HE(hipMalloc((void**)&(result.data), size * sizeof(float))); // TODO: set 1 for the initial grad data // TODO: get rid of sensitivity tensor HE(hipMemset(result.data, 0, size * sizeof(float))); HE(hipMalloc((void**)&(result.shape), shape.size() * sizeof(int))); HE(hipMemcpy(result.shape, &(shape[0]), shape.size() * sizeof(int), hipMemcpyHostToDevice)); result.dims = dims; result.size = size; return result; } void Tensor::downloadData(CUDATensor* src) { CUDATensor temp; HE(hipMemcpy(&temp, src, sizeof(CUDATensor), hipMemcpyDeviceToHost)); HE(hipMemcpy(&data[0], temp.data, size * sizeof(float), hipMemcpyDeviceToHost)); } void Tensor::uploadData(float* data, CUDATensor* dst) { CUDATensor temp; HE(hipMemcpy(&temp, dst, sizeof(CUDATensor), hipMemcpyDeviceToHost)); HE(hipMemcpy(temp.data, data, size * sizeof(float), hipMemcpyHostToDevice)); } vector<float> Tensor::getData() { downloadData(cuda_data); return vector<float>(data); } vector<float> Tensor::getGrad() { downloadData(cuda_grad); return vector<float>(data); } vector<float> Tensor::getSens() { downloadData(cuda_sens); return vector<float>(data); } void Tensor::clear(bool only_grad) { CUDATensor temp; if (!only_grad) { HE(hipMemcpy(&temp, cuda_data, sizeof(CUDATensor), hipMemcpyDeviceToHost)); HE(hipMemset(temp.data, 0, size * sizeof(float))); } HE(hipMemcpy(&temp, cuda_grad, sizeof(CUDATensor), hipMemcpyDeviceToHost)); HE(hipMemset(temp.data, 0, size * sizeof(float))); } vector<int> Tensor::getShape() { return shape; } int Tensor::getSize() { return size; } void Tensor::sync() { HE(hipDeviceSynchronize()); } void Tensor::reset() { HE(hipDeviceReset()); } void Tensor::setData(float* data) { uploadData(data, cuda_data); } void Tensor::setGrad(float* data) { uploadData(data, cuda_grad); } void Tensor::reshapeCUDA(vector<int> new_shape, CUDATensor* dst) { CUDATensor temp; HE(hipMemcpy(&temp, dst, sizeof(CUDATensor), hipMemcpyDeviceToHost)); HE(hipFree(temp.shape)); HE(hipMalloc((void**)&(temp.shape), new_shape.size() * sizeof(int))); HE(hipMemcpy(temp.shape, &new_shape[0], new_shape.size() * sizeof(int), hipMemcpyHostToDevice)); } void Tensor::reshape(vector<int> new_shape) { int new_size = 1; for (int i : new_shape) { new_size *= i; } if (new_size != size) { // Throw exception return; } shape = new_shape; reshapeCUDA(shape, cuda_data); reshapeCUDA(shape, cuda_grad); } Tensor* Tensor::squeeze(int axis) { if (axis == -1) axis = shape.size() - 1; if (shape.size() < axis + 1) { // Throw exception return this; } vector<int> new_shape = shape; new_shape[axis - 1] *= new_shape[axis]; new_shape.erase(new_shape.begin() + axis); reshape(new_shape); return this; } Tensor* Tensor::unsqueeze(int axis) { if (axis == -1) axis = shape.size() - 1; if (shape.size() < axis) { // Throw exception return this; } vector<int> new_shape = shape; new_shape.insert(new_shape.begin() + axis + 1, 1); reshape(new_shape); return this; } CUDATensor* Tensor::getCudaData() { return cuda_data; } CUDATensor* Tensor::getCudaGrad() { return cuda_grad; } CUDATensor* Tensor::getCudaSens() { return cuda_sens; }
e9b141019c3a75869286ca7904ec169bbf03fc21.cu
#include "Tensor.h" #include "Common.cuh" #include <iostream> using namespace std; extern cudaError_t cudaStatus; Tensor::Tensor(vector<int> shape, float* data) : shape(shape) { // Calculate size & dims size = 1; dims = 0; for (int s : shape) { size *= s; dims += 1; } // Copy data this->data.reserve(size); for (int i = 0; i < size; i++) { float value = data == 0 ? 0 : data[i]; this->data.push_back(value); } // Upload data onto GPU CUDATensor d_data = createCudaTensor(); HE(cudaMemcpy(d_data.data, &(this->data[0]), size * sizeof(float), cudaMemcpyHostToDevice)); HE(cudaMalloc((void**)&(cuda_data), sizeof(CUDATensor))); HE(cudaMalloc((void**)&(cuda_grad), sizeof(CUDATensor))); HE(cudaMalloc((void**)&(cuda_sens), sizeof(CUDATensor))); HE(cudaMemcpy(cuda_data, &d_data, sizeof(CUDATensor), cudaMemcpyHostToDevice)); HE(cudaMemcpy(cuda_grad, &createCudaTensor(), sizeof(CUDATensor), cudaMemcpyHostToDevice)); HE(cudaMemcpy(cuda_sens, &createCudaTensor(), sizeof(CUDATensor), cudaMemcpyHostToDevice)); } CUDATensor Tensor::createCudaTensor() { CUDATensor result; HE(cudaMalloc((void**)&(result.data), size * sizeof(float))); // TODO: set 1 for the initial grad data // TODO: get rid of sensitivity tensor HE(cudaMemset(result.data, 0, size * sizeof(float))); HE(cudaMalloc((void**)&(result.shape), shape.size() * sizeof(int))); HE(cudaMemcpy(result.shape, &(shape[0]), shape.size() * sizeof(int), cudaMemcpyHostToDevice)); result.dims = dims; result.size = size; return result; } void Tensor::downloadData(CUDATensor* src) { CUDATensor temp; HE(cudaMemcpy(&temp, src, sizeof(CUDATensor), cudaMemcpyDeviceToHost)); HE(cudaMemcpy(&data[0], temp.data, size * sizeof(float), cudaMemcpyDeviceToHost)); } void Tensor::uploadData(float* data, CUDATensor* dst) { CUDATensor temp; HE(cudaMemcpy(&temp, dst, sizeof(CUDATensor), cudaMemcpyDeviceToHost)); HE(cudaMemcpy(temp.data, data, size * sizeof(float), cudaMemcpyHostToDevice)); } vector<float> Tensor::getData() { downloadData(cuda_data); return vector<float>(data); } vector<float> Tensor::getGrad() { downloadData(cuda_grad); return vector<float>(data); } vector<float> Tensor::getSens() { downloadData(cuda_sens); return vector<float>(data); } void Tensor::clear(bool only_grad) { CUDATensor temp; if (!only_grad) { HE(cudaMemcpy(&temp, cuda_data, sizeof(CUDATensor), cudaMemcpyDeviceToHost)); HE(cudaMemset(temp.data, 0, size * sizeof(float))); } HE(cudaMemcpy(&temp, cuda_grad, sizeof(CUDATensor), cudaMemcpyDeviceToHost)); HE(cudaMemset(temp.data, 0, size * sizeof(float))); } vector<int> Tensor::getShape() { return shape; } int Tensor::getSize() { return size; } void Tensor::sync() { HE(cudaDeviceSynchronize()); } void Tensor::reset() { HE(cudaDeviceReset()); } void Tensor::setData(float* data) { uploadData(data, cuda_data); } void Tensor::setGrad(float* data) { uploadData(data, cuda_grad); } void Tensor::reshapeCUDA(vector<int> new_shape, CUDATensor* dst) { CUDATensor temp; HE(cudaMemcpy(&temp, dst, sizeof(CUDATensor), cudaMemcpyDeviceToHost)); HE(cudaFree(temp.shape)); HE(cudaMalloc((void**)&(temp.shape), new_shape.size() * sizeof(int))); HE(cudaMemcpy(temp.shape, &new_shape[0], new_shape.size() * sizeof(int), cudaMemcpyHostToDevice)); } void Tensor::reshape(vector<int> new_shape) { int new_size = 1; for (int i : new_shape) { new_size *= i; } if (new_size != size) { // Throw exception return; } shape = new_shape; reshapeCUDA(shape, cuda_data); reshapeCUDA(shape, cuda_grad); } Tensor* Tensor::squeeze(int axis) { if (axis == -1) axis = shape.size() - 1; if (shape.size() < axis + 1) { // Throw exception return this; } vector<int> new_shape = shape; new_shape[axis - 1] *= new_shape[axis]; new_shape.erase(new_shape.begin() + axis); reshape(new_shape); return this; } Tensor* Tensor::unsqueeze(int axis) { if (axis == -1) axis = shape.size() - 1; if (shape.size() < axis) { // Throw exception return this; } vector<int> new_shape = shape; new_shape.insert(new_shape.begin() + axis + 1, 1); reshape(new_shape); return this; } CUDATensor* Tensor::getCudaData() { return cuda_data; } CUDATensor* Tensor::getCudaGrad() { return cuda_grad; } CUDATensor* Tensor::getCudaSens() { return cuda_sens; }
787e63c9abf71e8d210f418eb6536fd1a98f37ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgeelltmv.cu normal z -> s, Fri Jul 18 17:34:27 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float *d_val, magma_index_t *d_colind, float *d_x, float beta, float *d_y) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; float val = d_val [ num_rows * n + row ]; if( val != 0) dot += val * d_x[col ]; } d_y[ row ] = dot * alpha + beta * d_y [ row ]; } } // shifted ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float *d_val, magma_index_t *d_colind, float *d_x, float beta, int offset, int blocksize, magma_index_t *add_rows, float *d_y){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; float val = d_val [ num_rows * n + row ]; if( val != 0) dot += val * d_x[col ]; } if( row<blocksize ) d_y[ row ] = dot * alpha - lambda * d_x[ offset+row ] + beta * d_y [ row ]; else d_y[ row ] = dot * alpha - lambda * d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param nnz_per_row magma_int_t number of elements in the longest row @param alpha float scalar multiplier @param d_val float* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x float* input vector x @param beta float scalar multiplier @param d_y float* input/output vector y @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float *d_val, magma_index_t *d_colind, float *d_x, float beta, float *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( sgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream , m, n, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param nnz_per_row magma_int_t number of elements in the longest row @param alpha float scalar multiplier @param lambda float scalar multiplier @param d_val float* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x float* input vector x @param beta float scalar multiplier @param offset magma_int_t in case not the main diagonal is scaled @param blocksize magma_int_t in case of processing multiple vectors @param add_rows magma_int_t* in case the matrixpowerskernel is used @param d_y float* input/output vector y @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, float *d_val, magma_index_t *d_colind, float *d_x, float beta, int offset, int blocksize, magma_index_t *add_rows, float *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); float tmp_shift; //magma_ssetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; hipLaunchKernelGGL(( sgeelltmv_kernel_shift), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream , m, n, nnz_per_row, alpha, tmp_shift, d_val, d_colind, d_x, beta, offset, blocksize, add_rows, d_y ); return MAGMA_SUCCESS; }
787e63c9abf71e8d210f418eb6536fd1a98f37ec.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @generated from zgeelltmv.cu normal z -> s, Fri Jul 18 17:34:27 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif // ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel( int num_rows, int num_cols, int num_cols_per_row, float alpha, float *d_val, magma_index_t *d_colind, float *d_x, float beta, float *d_y) { int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; float val = d_val [ num_rows * n + row ]; if( val != 0) dot += val * d_x[col ]; } d_y[ row ] = dot * alpha + beta * d_y [ row ]; } } // shifted ELL SpMV kernel //Michael Garland __global__ void sgeelltmv_kernel_shift( int num_rows, int num_cols, int num_cols_per_row, float alpha, float lambda, float *d_val, magma_index_t *d_colind, float *d_x, float beta, int offset, int blocksize, magma_index_t *add_rows, float *d_y){ int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ float dot = MAGMA_S_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = d_colind [ num_rows * n + row ]; float val = d_val [ num_rows * n + row ]; if( val != 0) dot += val * d_x[col ]; } if( row<blocksize ) d_y[ row ] = dot * alpha - lambda * d_x[ offset+row ] + beta * d_y [ row ]; else d_y[ row ] = dot * alpha - lambda * d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ]; } } /** Purpose ------- This routine computes y = alpha * A^t * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param nnz_per_row magma_int_t number of elements in the longest row @param alpha float scalar multiplier @param d_val float* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x float* input vector x @param beta float scalar multiplier @param d_y float* input/output vector y @ingroup magmasparse_s ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float *d_val, magma_index_t *d_colind, float *d_x, float beta, float *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); sgeelltmv_kernel<<< grid, BLOCK_SIZE, 0, magma_stream >>> ( m, n, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y ); return MAGMA_SUCCESS; } /** Purpose ------- This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU. Input format is ELL. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows in A @param n magma_int_t number of columns in A @param nnz_per_row magma_int_t number of elements in the longest row @param alpha float scalar multiplier @param lambda float scalar multiplier @param d_val float* array containing values of A in ELL @param d_colind magma_int_t* columnindices of A in ELL @param d_x float* input vector x @param beta float scalar multiplier @param offset magma_int_t in case not the main diagonal is scaled @param blocksize magma_int_t in case of processing multiple vectors @param add_rows magma_int_t* in case the matrixpowerskernel is used @param d_y float* input/output vector y @ingroup magmasparse_sblas ********************************************************************/ extern "C" magma_int_t magma_sgeelltmv_shift( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, float alpha, float lambda, float *d_val, magma_index_t *d_colind, float *d_x, float beta, int offset, int blocksize, magma_index_t *add_rows, float *d_y ){ dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); float tmp_shift; //magma_ssetvector(1,&lambda,1,&tmp_shift,1); tmp_shift = lambda; sgeelltmv_kernel_shift<<< grid, BLOCK_SIZE, 0, magma_stream >>> ( m, n, nnz_per_row, alpha, tmp_shift, d_val, d_colind, d_x, beta, offset, blocksize, add_rows, d_y ); return MAGMA_SUCCESS; }
829204ae5f56c0301c655fe04a4347e2f4c13a5b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/fvm_types.hpp> #include "gpu_api.hpp" #include "gpu_common.hpp" #include "matrix_common.hpp" #include "matrix_fine.hpp" namespace arb { namespace gpu { namespace kernels { // // gather and scatter kernels // // to[i] = from[p[i]] template <typename T, typename I> __global__ void gather(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[i] = from[p[i]]; } } // to[p[i]] = from[i] template <typename T, typename I> __global__ void scatter(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[p[i]] = from[i]; } } /// GPU implementation of Hines matrix assembly. /// Fine layout. /// For a given time step size dt: /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve. template <typename T, typename I> __global__ void assemble_matrix_fine( T* __restrict__ const d, T* __restrict__ const rhs, const T* __restrict__ const invariant_d, const T* __restrict__ const voltage, const T* __restrict__ const current, const T* __restrict__ const conductivity, const T* __restrict__ const cv_capacitance, const T* __restrict__ const area, const I* __restrict__ const cv_to_intdom, const T* __restrict__ const dt_intdom, const I* __restrict__ const perm, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < n) { // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units S (micro-Siemens). // See the model documentation in docs/model for more information. const auto dt = dt_intdom[cv_to_intdom[tid]]; const auto p = dt > 0; const auto pid = perm[tid]; const auto area_factor = T(1e-3)*area[tid]; const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid]; const auto r_d = gi + invariant_d[tid]; const auto r_rhs = gi*voltage[tid] - area_factor*current[tid]; d[pid] = p ? r_d : 0; rhs[pid] = p ? r_rhs : voltage[tid]; } } /// GPU implementation of Hines Matrix solver. /// Fine-grained tree based solver. /// Each block solves a set of matricesb iterating over the levels of matrix /// and perfoming a backward and forward substitution. On each level one thread /// gets assigned to one branch on this level of a matrix and solves and /// performs the substitution. Afterwards all threads continue on the next /// level. /// To avoid idle threads, one should try that on each level, there is a similar /// number of branches. template <typename T> __global__ void solve_matrix_fine( T* __restrict__ const rhs, T* __restrict__ const d, const T* __restrict__ const u, const level_metadata* __restrict__ const level_meta, const fvm_index_type* __restrict__ const level_lengths, const fvm_index_type* __restrict__ const level_parents, const fvm_index_type* __restrict__ const block_index, const fvm_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells { const auto tid = threadIdx.x; const auto bid = blockIdx.x; const auto first_level = block_index[bid]; const auto num_levels = block_index[bid + 1] - first_level; const auto block_level_meta = &level_meta[first_level]; // backward substitution for (unsigned l=0; l<num_levels-1; ++l) { // Metadata for this level and the next level const auto& lvl_meta = block_level_meta[l]; const auto& next_lvl_meta = block_level_meta[l+1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; // Perform backward substitution for each branch on this level. // One thread per branch. if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + tid; // Zero diagonal term implies dt==0; just leave rhs (for whole matrix) // alone in that case. // Each cell has a different `dt`, because we choose time step size // according to when the next event is arriving at a cell. So, some // cells require more time steps than others, but we have to solve // all the matrices at the same time. When a cell finishes, we put a // `0` on the diagonal to mark that it should not be solved for. if (d[pos]!=0) { // each branch perform substitution for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const auto d_next = d[next_pos]; const auto rhs_next = rhs[next_pos]; const T factor = -u[pos]/d[pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } // Update d and rhs at the parent node of this branch. // A parent may have more than one contributing to it, so we use // atomic updates to avoid races conditions. const unsigned parent_index = next_lvl_meta.matrix_data_index; const unsigned p = parent_index + lvl_parents[tid]; const T factor = -u[pos] / d[pos]; gpu_atomic_add(d + p, factor*u[pos]); gpu_atomic_add(rhs + p, factor*rhs[pos]); } } __syncthreads(); } // Solve the root { // The levels are sorted such that the root is the last level const auto& last_lvl_meta = block_level_meta[num_levels-1]; const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index; const unsigned width = num_matrix[bid]; if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = last_lvl_meta.matrix_data_index + tid; if (d[pos]!=0) { // backward for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const T factor = -u[pos] / d[pos]; const auto rhs_next = rhs[next_pos]; const auto d_next = d[next_pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } auto rhsp = rhs[pos] / d[pos]; rhs[pos] = rhsp; pos -= width; // forward for (unsigned i=0; i<len-1; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } // forward substitution // take great care with loop limits decrementing unsigned counter l for (unsigned l=num_levels-1; l>0; --l) { const auto& lvl_meta = block_level_meta[l-1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; const unsigned parent_index = block_level_meta[l].matrix_data_index; __syncthreads(); // Perform forward-substitution for each branch on this level. // One thread per branch. if (tid < width) { // Find the index of the first node in this branch. const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid; if (d[pos]!=0) { // Load the rhs value for the parent node of this branch. const unsigned p = parent_index + lvl_parents[tid]; T rhsp = rhs[p]; // each branch perform substitution for (unsigned i=0; i<len; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } } } // namespace kernels void gather( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); hipLaunchKernelGGL(( kernels::gather), dim3(griddim), dim3(blockdim), 0, 0, from, to, p, n); } void scatter( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); hipLaunchKernelGGL(( kernels::scatter), dim3(griddim), dim3(blockdim), 0, 0, from, to, p, n); } void assemble_matrix_fine( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* conductivity, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_index_type* cv_to_intdom, const fvm_value_type* dt_intdom, const fvm_index_type* perm, unsigned n) { const unsigned block_dim = 128; const unsigned num_blocks = impl::block_count(n, block_dim); hipLaunchKernelGGL(( kernels::assemble_matrix_fine), dim3(num_blocks), dim3(block_dim), 0, 0, d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area, cv_to_intdom, dt_intdom, perm, n); } // Example: // // block 0 block 1 block 2 // .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~ // // L0 \ / L5 \ / // \/ \/ // L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . . // \ / \ / \ / \|/ \ / \ / // L2 | | L4 | | | L7 | // | | | | | | // // levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ] // block_index = [0, 3, 5, 8, ...] // num_levels = [3, 2, 3, ...] // num_cells = [2, 3, ...] // num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size() void solve_matrix_fine( fvm_value_type* rhs, fvm_value_type* d, // diagonal values const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD) const level_metadata* level_meta, // information pertaining to each level const fvm_index_type* level_lengths, // lengths of branches of every level concatenated const fvm_index_type* level_parents, // parents of branches of every level concatenated const fvm_index_type* block_index, // start index into levels for each gpu block fvm_index_type* num_cells, // the number of cells packed into this single matrix fvm_index_type* padded_size, // length of rhs, d, u, including padding unsigned num_blocks, // number of blocks unsigned blocksize) // size of each block { hipLaunchKernelGGL(( kernels::solve_matrix_fine), dim3(num_blocks), dim3(blocksize), 0, 0, rhs, d, u, level_meta, level_lengths, level_parents, block_index, num_cells); } } // namespace gpu } // namespace arb
829204ae5f56c0301c655fe04a4347e2f4c13a5b.cu
#include <arbor/fvm_types.hpp> #include "gpu_api.hpp" #include "gpu_common.hpp" #include "matrix_common.hpp" #include "matrix_fine.hpp" namespace arb { namespace gpu { namespace kernels { // // gather and scatter kernels // // to[i] = from[p[i]] template <typename T, typename I> __global__ void gather(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[i] = from[p[i]]; } } // to[p[i]] = from[i] template <typename T, typename I> __global__ void scatter(const T* __restrict__ const from, T* __restrict__ const to, const I* __restrict__ const p, unsigned n) { unsigned i = threadIdx.x + blockDim.x*blockIdx.x; if (i<n) { to[p[i]] = from[i]; } } /// GPU implementation of Hines matrix assembly. /// Fine layout. /// For a given time step size dt: /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve. template <typename T, typename I> __global__ void assemble_matrix_fine( T* __restrict__ const d, T* __restrict__ const rhs, const T* __restrict__ const invariant_d, const T* __restrict__ const voltage, const T* __restrict__ const current, const T* __restrict__ const conductivity, const T* __restrict__ const cv_capacitance, const T* __restrict__ const area, const I* __restrict__ const cv_to_intdom, const T* __restrict__ const dt_intdom, const I* __restrict__ const perm, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid < n) { // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units μS (micro-Siemens). // See the model documentation in docs/model for more information. const auto dt = dt_intdom[cv_to_intdom[tid]]; const auto p = dt > 0; const auto pid = perm[tid]; const auto area_factor = T(1e-3)*area[tid]; const auto gi = T(1e-3)*cv_capacitance[tid]/dt + area_factor*conductivity[tid]; const auto r_d = gi + invariant_d[tid]; const auto r_rhs = gi*voltage[tid] - area_factor*current[tid]; d[pid] = p ? r_d : 0; rhs[pid] = p ? r_rhs : voltage[tid]; } } /// GPU implementation of Hines Matrix solver. /// Fine-grained tree based solver. /// Each block solves a set of matricesb iterating over the levels of matrix /// and perfoming a backward and forward substitution. On each level one thread /// gets assigned to one branch on this level of a matrix and solves and /// performs the substitution. Afterwards all threads continue on the next /// level. /// To avoid idle threads, one should try that on each level, there is a similar /// number of branches. template <typename T> __global__ void solve_matrix_fine( T* __restrict__ const rhs, T* __restrict__ const d, const T* __restrict__ const u, const level_metadata* __restrict__ const level_meta, const fvm_index_type* __restrict__ const level_lengths, const fvm_index_type* __restrict__ const level_parents, const fvm_index_type* __restrict__ const block_index, const fvm_index_type* __restrict__ const num_matrix) // number of packed matrices = number of cells { const auto tid = threadIdx.x; const auto bid = blockIdx.x; const auto first_level = block_index[bid]; const auto num_levels = block_index[bid + 1] - first_level; const auto block_level_meta = &level_meta[first_level]; // backward substitution for (unsigned l=0; l<num_levels-1; ++l) { // Metadata for this level and the next level const auto& lvl_meta = block_level_meta[l]; const auto& next_lvl_meta = block_level_meta[l+1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; // Perform backward substitution for each branch on this level. // One thread per branch. if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + tid; // Zero diagonal term implies dt==0; just leave rhs (for whole matrix) // alone in that case. // Each cell has a different `dt`, because we choose time step size // according to when the next event is arriving at a cell. So, some // cells require more time steps than others, but we have to solve // all the matrices at the same time. When a cell finishes, we put a // `0` on the diagonal to mark that it should not be solved for. if (d[pos]!=0) { // each branch perform substitution for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const auto d_next = d[next_pos]; const auto rhs_next = rhs[next_pos]; const T factor = -u[pos]/d[pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } // Update d and rhs at the parent node of this branch. // A parent may have more than one contributing to it, so we use // atomic updates to avoid races conditions. const unsigned parent_index = next_lvl_meta.matrix_data_index; const unsigned p = parent_index + lvl_parents[tid]; const T factor = -u[pos] / d[pos]; gpu_atomic_add(d + p, factor*u[pos]); gpu_atomic_add(rhs + p, factor*rhs[pos]); } } __syncthreads(); } // Solve the root { // The levels are sorted such that the root is the last level const auto& last_lvl_meta = block_level_meta[num_levels-1]; const auto lvl_lengths = level_lengths + last_lvl_meta.level_data_index; const unsigned width = num_matrix[bid]; if (tid < width) { const unsigned len = lvl_lengths[tid]; unsigned pos = last_lvl_meta.matrix_data_index + tid; if (d[pos]!=0) { // backward for (unsigned i=0; i<len-1; ++i) { const unsigned next_pos = pos + width; const T factor = -u[pos] / d[pos]; const auto rhs_next = rhs[next_pos]; const auto d_next = d[next_pos]; d[next_pos] = fma(factor, u[pos], d_next); rhs[next_pos] = fma(factor, rhs[pos], rhs_next); pos = next_pos; } auto rhsp = rhs[pos] / d[pos]; rhs[pos] = rhsp; pos -= width; // forward for (unsigned i=0; i<len-1; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } // forward substitution // take great care with loop limits decrementing unsigned counter l for (unsigned l=num_levels-1; l>0; --l) { const auto& lvl_meta = block_level_meta[l-1]; // Addresses of the first elements of level_lengths and level_parents // that belong to this level const auto lvl_lengths = level_lengths + lvl_meta.level_data_index; const auto lvl_parents = level_parents + lvl_meta.level_data_index; const unsigned width = lvl_meta.num_branches; const unsigned parent_index = block_level_meta[l].matrix_data_index; __syncthreads(); // Perform forward-substitution for each branch on this level. // One thread per branch. if (tid < width) { // Find the index of the first node in this branch. const unsigned len = lvl_lengths[tid]; unsigned pos = lvl_meta.matrix_data_index + (len-1)*width + tid; if (d[pos]!=0) { // Load the rhs value for the parent node of this branch. const unsigned p = parent_index + lvl_parents[tid]; T rhsp = rhs[p]; // each branch perform substitution for (unsigned i=0; i<len; ++i) { rhsp = rhs[pos] - u[pos]*rhsp; rhsp /= d[pos]; rhs[pos] = rhsp; pos -= width; } } } } } } // namespace kernels void gather( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); kernels::gather<<<griddim, blockdim>>>(from, to, p, n); } void scatter( const fvm_value_type* from, fvm_value_type* to, const fvm_index_type* p, unsigned n) { constexpr unsigned blockdim = 128; const unsigned griddim = impl::block_count(n, blockdim); kernels::scatter<<<griddim, blockdim>>>(from, to, p, n); } void assemble_matrix_fine( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* conductivity, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_index_type* cv_to_intdom, const fvm_value_type* dt_intdom, const fvm_index_type* perm, unsigned n) { const unsigned block_dim = 128; const unsigned num_blocks = impl::block_count(n, block_dim); kernels::assemble_matrix_fine<<<num_blocks, block_dim>>>( d, rhs, invariant_d, voltage, current, conductivity, cv_capacitance, area, cv_to_intdom, dt_intdom, perm, n); } // Example: // // block 0 block 1 block 2 // .~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~~~~~~~~~~~~~~. .~~~~~~~~~~~ ~ ~ // // L0 \ / L5 \ / // \/ \/ // L1 \ / \ / L3 \ / \ | / \ / L6 \ / . . . // \ / \ / \ / \|/ \ / \ / // L2 | | L4 | | | L7 | // | | | | | | // // levels = [L0, L1, L2, L3, L4, L5, L6, L7, ... ] // block_index = [0, 3, 5, 8, ...] // num_levels = [3, 2, 3, ...] // num_cells = [2, 3, ...] // num_blocks = level_start.size() - 1 = num_levels.size() = num_cells.size() void solve_matrix_fine( fvm_value_type* rhs, fvm_value_type* d, // diagonal values const fvm_value_type* u, // upper diagonal (and lower diagonal as the matrix is SPD) const level_metadata* level_meta, // information pertaining to each level const fvm_index_type* level_lengths, // lengths of branches of every level concatenated const fvm_index_type* level_parents, // parents of branches of every level concatenated const fvm_index_type* block_index, // start index into levels for each gpu block fvm_index_type* num_cells, // the number of cells packed into this single matrix fvm_index_type* padded_size, // length of rhs, d, u, including padding unsigned num_blocks, // number of blocks unsigned blocksize) // size of each block { kernels::solve_matrix_fine<<<num_blocks, blocksize>>>( rhs, d, u, level_meta, level_lengths, level_parents, block_index, num_cells); } } // namespace gpu } // namespace arb
88b235bfa6073bfdd4fbdf880b7c3917892e740b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" //chen #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
88b235bfa6073bfdd4fbdf880b7c3917892e740b.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" //chen #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
fc4bef9b4eb61585852ab1f6c37d22f3005ab1b8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> // for rand(), malloc(), free() #include <io.h> // for open(), write(), close() in WIN32 #include <fcntl.h> // for open(), write() #include <sys/stat.h> #include <windows.h> // for high-resolution performance counter #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ hipError_t e = hipGetLastError(); \ if (hipSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ hipGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif #define GRIDSIZE (8 * 1024) #define BLOCKSIZE 1024 #define TOTALSIZE (GRIDSIZE * BLOCKSIZE) // we need 256M byte Video RAM void genData(float* ptr, unsigned int size) { while (size--) { *ptr++ = (float)(rand() % 1000) / 1000.0F; } } void getDiff(float* dst, const float* src, unsigned int size) { for (register int i = 1; i < size; ++i) { dst[i] = src[i] - src[i - 1]; } } void writeData(char* filename, const float* src, unsigned int size) { int fd = open(filename, O_WRONLY | O_BINARY | O_CREAT, S_IREAD | S_IWRITE); write(fd, src, size * sizeof(float)); close(fd); printf("data written to \"%s\"\n", filename); } __global__ void adj_diff_shared(float* result, float* input) { extern __shared__ float s_data[]; register unsigned int tx = threadIdx.x; register unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; s_data[tx] = input[i]; __syncthreads(); if (tx > 0) { result[i] = s_data[tx] - s_data[tx - 1]; } else if (i > 0) { result[i] = s_data[tx] - input[i - 1]; } } int main(void) { float* pSource = NULL; float* pResult = NULL; int i; long long cntStart, cntEnd, freq; QueryPerformanceFrequency((LARGE_INTEGER*)(&freq)); // malloc memories on the host-side pSource = (float*)malloc(TOTALSIZE * sizeof(float)); pResult = (float*)malloc(TOTALSIZE * sizeof(float)); // generate source data genData(pSource, TOTALSIZE); // CUDA: allocate device memory float* pSourceDev = NULL; float* pResultDev = NULL; CUDA_CHECK( hipMalloc((void**)&pSourceDev, TOTALSIZE * sizeof(float)) ); CUDA_CHECK( hipMalloc((void**)&pResultDev, TOTALSIZE * sizeof(float)) ); // CUDA: copy from host to device CUDA_CHECK( hipMemcpy(pSourceDev, pSource, TOTALSIZE * sizeof(float), hipMemcpyHostToDevice) ); // start timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart)); // start the stop watch // CUDA: launch the kernel: result[i] = input[i] - input[i-1] dim3 dimGrid(GRIDSIZE, 1, 1); dim3 dimBlock(BLOCKSIZE, 1, 1); hipLaunchKernelGGL(( adj_diff_shared) , dim3(dimGrid), dim3(dimBlock), BLOCKSIZE* sizeof(float), 0, pResultDev, pSourceDev); // end timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd)); // end the stop watch printf("elapsed time = %f usec\n", (double)(cntEnd - cntStart) * 1000000.0 / (double)(freq)); // CUDA: copy from device to host CUDA_CHECK( hipMemcpy(pResult, pResultDev, TOTALSIZE * sizeof(float), hipMemcpyDeviceToHost) ); // write the result on the disk // writeData("host.out", pResult, TOTALSIZE); // print sample cases i = 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE - 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE / 2; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); // CUDA: free the memory CUDA_CHECK( hipFree(pSourceDev) ); CUDA_CHECK( hipFree(pResultDev) ); // free the memory free(pSource); free(pResult); }
fc4bef9b4eb61585852ab1f6c37d22f3005ab1b8.cu
#include <stdio.h> #include <stdlib.h> // for rand(), malloc(), free() #include <io.h> // for open(), write(), close() in WIN32 #include <fcntl.h> // for open(), write() #include <sys/stat.h> #include <windows.h> // for high-resolution performance counter #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ cudaError_t e = cudaGetLastError(); \ if (cudaSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ cudaGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif #define GRIDSIZE (8 * 1024) #define BLOCKSIZE 1024 #define TOTALSIZE (GRIDSIZE * BLOCKSIZE) // we need 256M byte Video RAM void genData(float* ptr, unsigned int size) { while (size--) { *ptr++ = (float)(rand() % 1000) / 1000.0F; } } void getDiff(float* dst, const float* src, unsigned int size) { for (register int i = 1; i < size; ++i) { dst[i] = src[i] - src[i - 1]; } } void writeData(char* filename, const float* src, unsigned int size) { int fd = open(filename, O_WRONLY | O_BINARY | O_CREAT, S_IREAD | S_IWRITE); write(fd, src, size * sizeof(float)); close(fd); printf("data written to \"%s\"\n", filename); } __global__ void adj_diff_shared(float* result, float* input) { extern __shared__ float s_data[]; register unsigned int tx = threadIdx.x; register unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; s_data[tx] = input[i]; __syncthreads(); if (tx > 0) { result[i] = s_data[tx] - s_data[tx - 1]; } else if (i > 0) { result[i] = s_data[tx] - input[i - 1]; } } int main(void) { float* pSource = NULL; float* pResult = NULL; int i; long long cntStart, cntEnd, freq; QueryPerformanceFrequency((LARGE_INTEGER*)(&freq)); // malloc memories on the host-side pSource = (float*)malloc(TOTALSIZE * sizeof(float)); pResult = (float*)malloc(TOTALSIZE * sizeof(float)); // generate source data genData(pSource, TOTALSIZE); // CUDA: allocate device memory float* pSourceDev = NULL; float* pResultDev = NULL; CUDA_CHECK( cudaMalloc((void**)&pSourceDev, TOTALSIZE * sizeof(float)) ); CUDA_CHECK( cudaMalloc((void**)&pResultDev, TOTALSIZE * sizeof(float)) ); // CUDA: copy from host to device CUDA_CHECK( cudaMemcpy(pSourceDev, pSource, TOTALSIZE * sizeof(float), cudaMemcpyHostToDevice) ); // start timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntStart)); // start the stop watch // CUDA: launch the kernel: result[i] = input[i] - input[i-1] dim3 dimGrid(GRIDSIZE, 1, 1); dim3 dimBlock(BLOCKSIZE, 1, 1); adj_diff_shared <<< dimGrid, dimBlock, BLOCKSIZE* sizeof(float)>>>(pResultDev, pSourceDev); // end timer QueryPerformanceCounter((LARGE_INTEGER*)(&cntEnd)); // end the stop watch printf("elapsed time = %f usec\n", (double)(cntEnd - cntStart) * 1000000.0 / (double)(freq)); // CUDA: copy from device to host CUDA_CHECK( cudaMemcpy(pResult, pResultDev, TOTALSIZE * sizeof(float), cudaMemcpyDeviceToHost) ); // write the result on the disk // writeData("host.out", pResult, TOTALSIZE); // print sample cases i = 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE - 1; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); i = TOTALSIZE / 2; printf("i=%2d: %f = %f - %f\n", i, pResult[i], pSource[i], pSource[i - 1]); // CUDA: free the memory CUDA_CHECK( cudaFree(pSourceDev) ); CUDA_CHECK( cudaFree(pResultDev) ); // free the memory free(pSource); free(pResult); }
699a57d867bb0a1c351530436b412bc8f454899d.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2019 Ole-Christoffer Granmo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This code implements a multiclass version of the Tsetlin Machine from paper arXiv:1804.01508 https://arxiv.org/abs/1804.01508 */ #include <iostream> #include <math.h> #include <stdio.h> #include <assert.h> #include <time.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include "TsetlinMachineConfig.cuh" #include "MultiClassTsetlinMachine.cuh" #include "GPUConfig.cuh" #define NUMBER_OF_TRAINING_EXAMPLES 25000 #define NUMBER_OF_TEST_EXAMPLES 25000 #define EXPERIMENTS 100 #define EPOCHS 200 #define DEVICE 0 int y_train[NUMBER_OF_TRAINING_EXAMPLES], y_test[NUMBER_OF_TEST_EXAMPLES]; int *X_train; int *X_test; void read_file(void) { FILE * fp; char * line = NULL; size_t len = 0; const char *s = " "; char *token = NULL; // Training Dataset for (int i = 0; i < NUMBER_OF_TRAINING_EXAMPLES; i++) { for (int j = 0; j < LA_CHUNKS; j++) { X_train[i*LA_CHUNKS + j] = 0; } } fp = fopen("IMDBTrainingData.txt", "r"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NUMBER_OF_TRAINING_EXAMPLES; i++) { getline(&line, &len, fp); token = strtok(line, s); for (int j = 0; j < FEATURES; j++) { if (atoi(token) == 1) { int chunk_nr = j / INT_SIZE; int chunk_pos = j % INT_SIZE; X_train[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } else { int chunk_nr = (j + FEATURES) / INT_SIZE; int chunk_pos = (j + FEATURES) % INT_SIZE; X_train[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } token=strtok(NULL,s); } y_train[i] = atoi(token); } fclose(fp); // Test Dataset for (int i = 0; i < NUMBER_OF_TEST_EXAMPLES; i++) { for (int j = 0; j < LA_CHUNKS; j++) { X_test[i*LA_CHUNKS + j] = 0; } } fp = fopen("IMDBTestData.txt", "r"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NUMBER_OF_TEST_EXAMPLES; i++) { getline(&line, &len, fp); token = strtok(line, s); for (int j = 0; j < FEATURES; j++) { if (atoi(token) == 1) { int chunk_nr = j / INT_SIZE; int chunk_pos = j % INT_SIZE; X_test[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } else { int chunk_nr = (j + FEATURES) / INT_SIZE; int chunk_pos = (j + FEATURES) % INT_SIZE; X_test[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } token=strtok(NULL,s); } y_test[i] = atoi(token); } fclose(fp); } __global__ void setup_kernel(hiprandState_t *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ hiprand_init(1234, id, 0, &state[id]); } int main(void) { FILE *fp; hiprandState_t *devStates; hipSetDevice(DEVICE); int numSMs; hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, DEVICE); printf("Num SMS: %d\n", numSMs); // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&X_train, NUMBER_OF_TRAINING_EXAMPLES * LA_CHUNKS * sizeof(int)); hipMallocManaged(&X_test, NUMBER_OF_TEST_EXAMPLES * LA_CHUNKS * sizeof(int)); read_file(); hipMallocManaged((void **)&devStates, GRID_SIZE * BLOCK_SIZE * sizeof(hiprandState_t)); hipLaunchKernelGGL(( setup_kernel), dim3(GRID_SIZE),dim3(BLOCK_SIZE), 0, 0, devStates); hipDeviceSynchronize(); MultiClassTsetlinMachine<2> mc_tm; fp = fopen("./statistics.txt","w"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int e = 0; e < EXPERIMENTS; ++e) { printf("\nEXPERIMENT %d\n", e+1); mc_tm.initialize(); for (int i = 0; i < EPOCHS; ++i) { printf("\n##### EPOCH %d #####\n", i+1); clock_t start, end; double gpu_time_testing, gpu_time_training; start = clock(); mc_tm.fit(devStates, X_train, y_train, NUMBER_OF_TRAINING_EXAMPLES, S, 1); end = clock(); gpu_time_training = ((double) (end - start)) / CLOCKS_PER_SEC; start = clock(); mc_tm.evaluate(X_test, y_test, NUMBER_OF_TEST_EXAMPLES); end = clock(); gpu_time_testing = ((double) (end - start)) / CLOCKS_PER_SEC; for (int n = 0; n < 2; ++n) { printf("\n-- CLASS %d --\n\n", n+1); float precision = 1.0 * mc_tm.true_positive[n] / (mc_tm.true_positive[n] + mc_tm.false_positive[n]); printf("PRECISION: %.3f\n", precision); float recall = 1.0 * mc_tm.true_positive[n] / (mc_tm.true_positive[n] + mc_tm.false_negative[n]); printf("RECALL: %.3f\n", recall); float fscore = 2 * precision * recall / (precision + recall); printf("F-SCORE: %.3f\n", fscore); fprintf(fp, "%d %d %d %d %d %d %d %.4f %.4f %.4f %f %f\n", e, i, n, mc_tm.true_positive[n], mc_tm.false_positive[n], mc_tm.true_negative[n], mc_tm.false_negative[n], precision, recall, fscore, gpu_time_training, gpu_time_testing); fflush(fp); } printf("\n"); printf("TRAINING TIME: %f\n", gpu_time_training); printf("TESTING TIME: %f\n", gpu_time_testing); } } fclose(fp); delete &mc_tm; hipFree(devStates); hipFree(X_train); hipFree(X_test); return 0; }
699a57d867bb0a1c351530436b412bc8f454899d.cu
/* Copyright (c) 2019 Ole-Christoffer Granmo Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. This code implements a multiclass version of the Tsetlin Machine from paper arXiv:1804.01508 https://arxiv.org/abs/1804.01508 */ #include <iostream> #include <math.h> #include <stdio.h> #include <assert.h> #include <time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include "TsetlinMachineConfig.cuh" #include "MultiClassTsetlinMachine.cuh" #include "GPUConfig.cuh" #define NUMBER_OF_TRAINING_EXAMPLES 25000 #define NUMBER_OF_TEST_EXAMPLES 25000 #define EXPERIMENTS 100 #define EPOCHS 200 #define DEVICE 0 int y_train[NUMBER_OF_TRAINING_EXAMPLES], y_test[NUMBER_OF_TEST_EXAMPLES]; int *X_train; int *X_test; void read_file(void) { FILE * fp; char * line = NULL; size_t len = 0; const char *s = " "; char *token = NULL; // Training Dataset for (int i = 0; i < NUMBER_OF_TRAINING_EXAMPLES; i++) { for (int j = 0; j < LA_CHUNKS; j++) { X_train[i*LA_CHUNKS + j] = 0; } } fp = fopen("IMDBTrainingData.txt", "r"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NUMBER_OF_TRAINING_EXAMPLES; i++) { getline(&line, &len, fp); token = strtok(line, s); for (int j = 0; j < FEATURES; j++) { if (atoi(token) == 1) { int chunk_nr = j / INT_SIZE; int chunk_pos = j % INT_SIZE; X_train[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } else { int chunk_nr = (j + FEATURES) / INT_SIZE; int chunk_pos = (j + FEATURES) % INT_SIZE; X_train[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } token=strtok(NULL,s); } y_train[i] = atoi(token); } fclose(fp); // Test Dataset for (int i = 0; i < NUMBER_OF_TEST_EXAMPLES; i++) { for (int j = 0; j < LA_CHUNKS; j++) { X_test[i*LA_CHUNKS + j] = 0; } } fp = fopen("IMDBTestData.txt", "r"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int i = 0; i < NUMBER_OF_TEST_EXAMPLES; i++) { getline(&line, &len, fp); token = strtok(line, s); for (int j = 0; j < FEATURES; j++) { if (atoi(token) == 1) { int chunk_nr = j / INT_SIZE; int chunk_pos = j % INT_SIZE; X_test[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } else { int chunk_nr = (j + FEATURES) / INT_SIZE; int chunk_pos = (j + FEATURES) % INT_SIZE; X_test[i*LA_CHUNKS + chunk_nr] |= (1 << chunk_pos); } token=strtok(NULL,s); } y_test[i] = atoi(token); } fclose(fp); } __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, id, 0, &state[id]); } int main(void) { FILE *fp; curandState *devStates; cudaSetDevice(DEVICE); int numSMs; cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, DEVICE); printf("Num SMS: %d\n", numSMs); // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&X_train, NUMBER_OF_TRAINING_EXAMPLES * LA_CHUNKS * sizeof(int)); cudaMallocManaged(&X_test, NUMBER_OF_TEST_EXAMPLES * LA_CHUNKS * sizeof(int)); read_file(); cudaMallocManaged((void **)&devStates, GRID_SIZE * BLOCK_SIZE * sizeof(curandState)); setup_kernel<<<GRID_SIZE,BLOCK_SIZE>>>(devStates); cudaDeviceSynchronize(); MultiClassTsetlinMachine<2> mc_tm; fp = fopen("./statistics.txt","w"); if (fp == NULL) { printf("Error opening\n"); exit(EXIT_FAILURE); } for (int e = 0; e < EXPERIMENTS; ++e) { printf("\nEXPERIMENT %d\n", e+1); mc_tm.initialize(); for (int i = 0; i < EPOCHS; ++i) { printf("\n##### EPOCH %d #####\n", i+1); clock_t start, end; double gpu_time_testing, gpu_time_training; start = clock(); mc_tm.fit(devStates, X_train, y_train, NUMBER_OF_TRAINING_EXAMPLES, S, 1); end = clock(); gpu_time_training = ((double) (end - start)) / CLOCKS_PER_SEC; start = clock(); mc_tm.evaluate(X_test, y_test, NUMBER_OF_TEST_EXAMPLES); end = clock(); gpu_time_testing = ((double) (end - start)) / CLOCKS_PER_SEC; for (int n = 0; n < 2; ++n) { printf("\n-- CLASS %d --\n\n", n+1); float precision = 1.0 * mc_tm.true_positive[n] / (mc_tm.true_positive[n] + mc_tm.false_positive[n]); printf("PRECISION: %.3f\n", precision); float recall = 1.0 * mc_tm.true_positive[n] / (mc_tm.true_positive[n] + mc_tm.false_negative[n]); printf("RECALL: %.3f\n", recall); float fscore = 2 * precision * recall / (precision + recall); printf("F-SCORE: %.3f\n", fscore); fprintf(fp, "%d %d %d %d %d %d %d %.4f %.4f %.4f %f %f\n", e, i, n, mc_tm.true_positive[n], mc_tm.false_positive[n], mc_tm.true_negative[n], mc_tm.false_negative[n], precision, recall, fscore, gpu_time_training, gpu_time_testing); fflush(fp); } printf("\n"); printf("TRAINING TIME: %f\n", gpu_time_training); printf("TESTING TIME: %f\n", gpu_time_testing); } } fclose(fp); delete &mc_tm; cudaFree(devStates); cudaFree(X_train); cudaFree(X_test); return 0; }
4c979488bbccd6d408ad0b9c1e08860737b29420.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "cuda_utils.h" #include "distance/distance.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename Type> __global__ void naiveDistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k, DistanceType type) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; Type acc = Type(0); for (int i = 0; i < k; ++i) { auto diff = x[i + midx * k] - y[i + nidx * k]; acc += diff * diff; } if (type == EucExpandedL2Sqrt || type == EucUnexpandedL2Sqrt) acc = mySqrt(acc); dist[midx * n + nidx] = acc; } template <typename Type> __global__ void naiveL1DistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } Type acc = Type(0); for (int i = 0; i < k; ++i) { auto a = x[i + midx * k]; auto b = y[i + nidx * k]; auto diff = (a > b) ? (a - b) : (b - a); acc += diff; } dist[midx * n + nidx] = acc; } template <typename Type> __global__ void naiveCosineDistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } Type acc_a = Type(0); Type acc_b = Type(0); Type acc_ab = Type(0); for (int i = 0; i < k; ++i) { auto a = x[i + midx * k]; auto b = y[i + nidx * k]; acc_a += a * a; acc_b += b * b; acc_ab += a * b; } dist[midx * n + nidx] = acc_ab / (mySqrt(acc_a) * mySqrt(acc_b)); } template <typename Type> void naiveDistance(Type *dist, const Type *x, const Type *y, int m, int n, int k, DistanceType type) { static const dim3 TPB(16, 32, 1); dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1); switch (type) { case EucUnexpandedL1: hipLaunchKernelGGL(( naiveL1DistanceKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k); break; case EucUnexpandedL2Sqrt: case EucUnexpandedL2: case EucExpandedL2Sqrt: case EucExpandedL2: hipLaunchKernelGGL(( naiveDistanceKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k, type); break; case EucExpandedCosine: hipLaunchKernelGGL(( naiveCosineDistanceKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k); break; default: FAIL() << "should be here\n"; } CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct DistanceInputs { T tolerance; int m, n, k; DistanceType type; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const DistanceInputs<T> &dims) { return os; } template <typename T, typename OutputTile_t> void distanceLauncher(T* x, T* y, T* dist, T* dist2, int m, int n, int k, DistanceInputs<T>& params, T threshold, char* workspace, size_t worksize) { auto fin_op = [dist2, threshold] __device__(T d_val, int g_d_idx) { dist2[g_d_idx] = (d_val < threshold) ? 0.f : d_val; return d_val; }; distance<T, T, T, OutputTile_t>( x, y, dist, m, n, k, params.type, workspace, worksize, fin_op); } template <typename T> class DistanceTest : public ::testing::TestWithParam<DistanceInputs<T>> { public: void SetUp() override { params = ::testing::TestWithParam<DistanceInputs<T>>::GetParam(); Random::Rng r(params.seed); int m = params.m; int n = params.n; int k = params.k; allocate(x, m * k); allocate(y, n * k); allocate(dist_ref, m * n); allocate(dist, m * n); allocate(dist2, m * n); r.uniform(x, m * k, T(-1.0), T(1.0)); r.uniform(y, n * k, T(-1.0), T(1.0)); naiveDistance(dist_ref, x, y, m, n, k, params.type); char *workspace = nullptr; size_t worksize = 0; typedef cutlass::Shape<8, 128, 128> OutputTile_t; if (params.type <= EucExpandedCosine) { distance<T, T, T, OutputTile_t>( x, y, dist, m, n, k, params.type, nullptr, worksize); if (worksize != 0) { allocate(workspace, worksize); } } T threshold = -10000.f; distanceLauncher<T,OutputTile_t>(x, y, dist, dist2, m, n, k, params, threshold, workspace, worksize); if (params.type <= EucExpandedCosine) { CUDA_CHECK(hipFree(workspace)); } } void TearDown() override { CUDA_CHECK(hipFree(x)); CUDA_CHECK(hipFree(y)); CUDA_CHECK(hipFree(dist_ref)); CUDA_CHECK(hipFree(dist)); CUDA_CHECK(hipFree(dist2)); } protected: DistanceInputs<T> params; T *x, *y, *dist_ref, *dist, *dist2; }; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, EucExpandedL2, 1234ULL}, // accumulate issue due to x^2 + y^2 -2xy {0.001f, 1024, 32, 1024, EucExpandedL2, 1234ULL}, {0.001f, 32, 1024, 1024, EucExpandedL2, 1234ULL}, {0.003f, 1024, 1024, 1024, EucExpandedL2, 1234ULL}, {0.03f, 1024, 1024, 32, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 1024, 32, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 32, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 1024, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 32, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 32, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 32, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL1, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL1, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL1, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL1, 1234ULL}, }; typedef DistanceTest<float> DistanceTestF; TEST_P(DistanceTestF, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, EucExpandedL2, 1234ULL}, // accumulate issue due to x^2 + y^2 -2xy {0.001, 1024, 32, 1024, EucExpandedL2, 1234ULL}, {0.001, 32, 1024, 1024, EucExpandedL2, 1234ULL}, {0.003, 1024, 1024, 1024, EucExpandedL2, 1234ULL}, {0.03, 1024, 1024, 32, EucExpandedL2Sqrt, 1234ULL}, {0.03, 1024, 32, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03, 32, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03, 1024, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 32, EucExpandedCosine, 1234ULL}, {0.001, 1024, 32, 1024, EucExpandedCosine, 1234ULL}, {0.001, 32, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001, 1024, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL1, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL1, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL1, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL1, 1234ULL}, }; typedef DistanceTest<double> DistanceTestD; TEST_P(DistanceTestD, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceTestD, ::testing::ValuesIn(inputsd)); } // end namespace Distance } // end namespace MLCommon
4c979488bbccd6d408ad0b9c1e08860737b29420.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "cuda_utils.h" #include "distance/distance.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace Distance { template <typename Type> __global__ void naiveDistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k, DistanceType type) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) return; Type acc = Type(0); for (int i = 0; i < k; ++i) { auto diff = x[i + midx * k] - y[i + nidx * k]; acc += diff * diff; } if (type == EucExpandedL2Sqrt || type == EucUnexpandedL2Sqrt) acc = mySqrt(acc); dist[midx * n + nidx] = acc; } template <typename Type> __global__ void naiveL1DistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } Type acc = Type(0); for (int i = 0; i < k; ++i) { auto a = x[i + midx * k]; auto b = y[i + nidx * k]; auto diff = (a > b) ? (a - b) : (b - a); acc += diff; } dist[midx * n + nidx] = acc; } template <typename Type> __global__ void naiveCosineDistanceKernel(Type *dist, const Type *x, const Type *y, int m, int n, int k) { int midx = threadIdx.x + blockIdx.x * blockDim.x; int nidx = threadIdx.y + blockIdx.y * blockDim.y; if (midx >= m || nidx >= n) { return; } Type acc_a = Type(0); Type acc_b = Type(0); Type acc_ab = Type(0); for (int i = 0; i < k; ++i) { auto a = x[i + midx * k]; auto b = y[i + nidx * k]; acc_a += a * a; acc_b += b * b; acc_ab += a * b; } dist[midx * n + nidx] = acc_ab / (mySqrt(acc_a) * mySqrt(acc_b)); } template <typename Type> void naiveDistance(Type *dist, const Type *x, const Type *y, int m, int n, int k, DistanceType type) { static const dim3 TPB(16, 32, 1); dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1); switch (type) { case EucUnexpandedL1: naiveL1DistanceKernel<Type><<<nblks, TPB>>>(dist, x, y, m, n, k); break; case EucUnexpandedL2Sqrt: case EucUnexpandedL2: case EucExpandedL2Sqrt: case EucExpandedL2: naiveDistanceKernel<Type><<<nblks, TPB>>>(dist, x, y, m, n, k, type); break; case EucExpandedCosine: naiveCosineDistanceKernel<Type><<<nblks, TPB>>>(dist, x, y, m, n, k); break; default: FAIL() << "should be here\n"; } CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct DistanceInputs { T tolerance; int m, n, k; DistanceType type; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const DistanceInputs<T> &dims) { return os; } template <typename T, typename OutputTile_t> void distanceLauncher(T* x, T* y, T* dist, T* dist2, int m, int n, int k, DistanceInputs<T>& params, T threshold, char* workspace, size_t worksize) { auto fin_op = [dist2, threshold] __device__(T d_val, int g_d_idx) { dist2[g_d_idx] = (d_val < threshold) ? 0.f : d_val; return d_val; }; distance<T, T, T, OutputTile_t>( x, y, dist, m, n, k, params.type, workspace, worksize, fin_op); } template <typename T> class DistanceTest : public ::testing::TestWithParam<DistanceInputs<T>> { public: void SetUp() override { params = ::testing::TestWithParam<DistanceInputs<T>>::GetParam(); Random::Rng r(params.seed); int m = params.m; int n = params.n; int k = params.k; allocate(x, m * k); allocate(y, n * k); allocate(dist_ref, m * n); allocate(dist, m * n); allocate(dist2, m * n); r.uniform(x, m * k, T(-1.0), T(1.0)); r.uniform(y, n * k, T(-1.0), T(1.0)); naiveDistance(dist_ref, x, y, m, n, k, params.type); char *workspace = nullptr; size_t worksize = 0; typedef cutlass::Shape<8, 128, 128> OutputTile_t; if (params.type <= EucExpandedCosine) { distance<T, T, T, OutputTile_t>( x, y, dist, m, n, k, params.type, nullptr, worksize); if (worksize != 0) { allocate(workspace, worksize); } } T threshold = -10000.f; distanceLauncher<T,OutputTile_t>(x, y, dist, dist2, m, n, k, params, threshold, workspace, worksize); if (params.type <= EucExpandedCosine) { CUDA_CHECK(cudaFree(workspace)); } } void TearDown() override { CUDA_CHECK(cudaFree(x)); CUDA_CHECK(cudaFree(y)); CUDA_CHECK(cudaFree(dist_ref)); CUDA_CHECK(cudaFree(dist)); CUDA_CHECK(cudaFree(dist2)); } protected: DistanceInputs<T> params; T *x, *y, *dist_ref, *dist, *dist2; }; const std::vector<DistanceInputs<float>> inputsf = { {0.001f, 1024, 1024, 32, EucExpandedL2, 1234ULL}, // accumulate issue due to x^2 + y^2 -2xy {0.001f, 1024, 32, 1024, EucExpandedL2, 1234ULL}, {0.001f, 32, 1024, 1024, EucExpandedL2, 1234ULL}, {0.003f, 1024, 1024, 1024, EucExpandedL2, 1234ULL}, {0.03f, 1024, 1024, 32, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 1024, 32, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 32, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03f, 1024, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001f, 1024, 1024, 32, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 32, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 32, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001f, 1024, 1024, 32, EucUnexpandedL1, 1234ULL}, {0.001f, 1024, 32, 1024, EucUnexpandedL1, 1234ULL}, {0.001f, 32, 1024, 1024, EucUnexpandedL1, 1234ULL}, {0.001f, 1024, 1024, 1024, EucUnexpandedL1, 1234ULL}, }; typedef DistanceTest<float> DistanceTestF; TEST_P(DistanceTestF, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceTestF, ::testing::ValuesIn(inputsf)); const std::vector<DistanceInputs<double>> inputsd = { {0.001, 1024, 1024, 32, EucExpandedL2, 1234ULL}, // accumulate issue due to x^2 + y^2 -2xy {0.001, 1024, 32, 1024, EucExpandedL2, 1234ULL}, {0.001, 32, 1024, 1024, EucExpandedL2, 1234ULL}, {0.003, 1024, 1024, 1024, EucExpandedL2, 1234ULL}, {0.03, 1024, 1024, 32, EucExpandedL2Sqrt, 1234ULL}, {0.03, 1024, 32, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03, 32, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.03, 1024, 1024, 1024, EucExpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL2, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL2Sqrt, 1234ULL}, {0.001, 1024, 1024, 32, EucExpandedCosine, 1234ULL}, {0.001, 1024, 32, 1024, EucExpandedCosine, 1234ULL}, {0.001, 32, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001, 1024, 1024, 1024, EucExpandedCosine, 1234ULL}, {0.001, 1024, 1024, 32, EucUnexpandedL1, 1234ULL}, {0.001, 1024, 32, 1024, EucUnexpandedL1, 1234ULL}, {0.001, 32, 1024, 1024, EucUnexpandedL1, 1234ULL}, {0.001, 1024, 1024, 1024, EucUnexpandedL1, 1234ULL}, }; typedef DistanceTest<double> DistanceTestD; TEST_P(DistanceTestD, Result) { ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(DistanceTests, DistanceTestD, ::testing::ValuesIn(inputsd)); } // end namespace Distance } // end namespace MLCommon
1cab6cc9f2c333e2e5c7de4004869561b8ab9b7f.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda_process.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main(int argc, char **argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); dothething(); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. struct hipDeviceProp_t props; hipGetDeviceProperties(&props, 0); std::cout << props.name << std::endl; cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
1cab6cc9f2c333e2e5c7de4004869561b8ab9b7f.cu
#include "cuda_process.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main(int argc, char **argv) { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); dothething(); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. struct cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); std::cout << props.name << std::endl; cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
smooth.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "gputime.h" #include <hip/hip_runtime.h> #define N 256 #define ARRAY_SIZE 4096 using namespace std; //Naive smooth kernel without shared memory __global__ void smooth_naive(float * v, float * v_new) { int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } //Faster kernel using shared memory //upto 2.4x faster on GEFORCE 920m __global__ void smooth(float *arr,float *out) { extern __shared__ float smem[]; int tid = threadIdx.x; int gid = blockIdx.x*blockDim.x + threadIdx.x; int block = blockIdx.x; int mb = gridDim.x; smem[tid + 1] = arr[gid]; if (block == 0) { if (tid == 0) { smem[0] = smem[1]; } } else { if (tid == 0) smem[0] = arr[gid - 1]; } if (block == mb-1){ if (tid == N - 1) { smem[tid + 2] = arr[tid + 1]; } else { smem[tid + 2] = arr[gid + 1]; } } __syncthreads(); out[gid] = smem[tid] * 0.25f + smem[tid+1] * 0.5f + smem[tid + 2] * 0.25f; } __global__ void warmup() { //warmup kernel to launch gpu kernels quickly afterwards int id = threadIdx.x + blockIdx.x*blockDim.x; int x = id * 2; } int main() { float *d_in, *d_out; float h_in[4096],h_out[4096]; for (int i = 0; i < 4096; i++) { h_in[i] = (float)rand() / (float)RAND_MAX;; } hipMalloc((float**)&d_in, 4096 * sizeof(float)); hipMemcpy(d_in, h_in, sizeof(float)*ARRAY_SIZE, hipMemcpyHostToDevice); hipMalloc((float**)&d_out, 4096 * sizeof(float)); warmup << <1, 1024 >> >(); GpuTimer timer,timer2; timer.Start(); smooth_naive << < 16, 256 >> >(d_in, d_out); timer.Stop(); cout << timer.Elapsed() << endl; //hipError_t err = hipGetLastError(); //cout << hipGetErrorString(err) << endl; timer2.Start(); smooth <<< 16, 256, 258 * sizeof(float) >> >(d_in, d_out); timer2.Stop(); cout << timer2.Elapsed() << endl; hipMemcpy(h_out, d_out, sizeof(float)*ARRAY_SIZE, hipMemcpyDeviceToHost); //for (int i = 0; i < 4096; i++) // cout << h_out[i] << " "; hipFree(d_in); hipFree(d_out); hipDeviceReset(); return 0; }
smooth.cu
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "gputime.h" #include <cuda.h> #define N 256 #define ARRAY_SIZE 4096 using namespace std; //Naive smooth kernel without shared memory __global__ void smooth_naive(float * v, float * v_new) { int myIdx = threadIdx.x * gridDim.x + blockIdx.x; int numThreads = blockDim.x * gridDim.x; int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1; int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1; float myElt = v[myIdx]; float myLeftElt = v[myLeftIdx]; float myRightElt = v[myRightIdx]; v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt; } //Faster kernel using shared memory //upto 2.4x faster on GEFORCE 920m __global__ void smooth(float *arr,float *out) { extern __shared__ float smem[]; int tid = threadIdx.x; int gid = blockIdx.x*blockDim.x + threadIdx.x; int block = blockIdx.x; int mb = gridDim.x; smem[tid + 1] = arr[gid]; if (block == 0) { if (tid == 0) { smem[0] = smem[1]; } } else { if (tid == 0) smem[0] = arr[gid - 1]; } if (block == mb-1){ if (tid == N - 1) { smem[tid + 2] = arr[tid + 1]; } else { smem[tid + 2] = arr[gid + 1]; } } __syncthreads(); out[gid] = smem[tid] * 0.25f + smem[tid+1] * 0.5f + smem[tid + 2] * 0.25f; } __global__ void warmup() { //warmup kernel to launch gpu kernels quickly afterwards int id = threadIdx.x + blockIdx.x*blockDim.x; int x = id * 2; } int main() { float *d_in, *d_out; float h_in[4096],h_out[4096]; for (int i = 0; i < 4096; i++) { h_in[i] = (float)rand() / (float)RAND_MAX;; } cudaMalloc((float**)&d_in, 4096 * sizeof(float)); cudaMemcpy(d_in, h_in, sizeof(float)*ARRAY_SIZE, cudaMemcpyHostToDevice); cudaMalloc((float**)&d_out, 4096 * sizeof(float)); warmup << <1, 1024 >> >(); GpuTimer timer,timer2; timer.Start(); smooth_naive << < 16, 256 >> >(d_in, d_out); timer.Stop(); cout << timer.Elapsed() << endl; //cudaError_t err = cudaGetLastError(); //cout << cudaGetErrorString(err) << endl; timer2.Start(); smooth <<< 16, 256, 258 * sizeof(float) >> >(d_in, d_out); timer2.Stop(); cout << timer2.Elapsed() << endl; cudaMemcpy(h_out, d_out, sizeof(float)*ARRAY_SIZE, cudaMemcpyDeviceToHost); //for (int i = 0; i < 4096; i++) // cout << h_out[i] << " "; cudaFree(d_in); cudaFree(d_out); cudaDeviceReset(); return 0; }
bfbaa92bf45d52da5f245bfaeb12a61921195c47.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <rocblas.h> #include <math.h> #include <assert.h> #include "cudamat_conv_kernels.cuh" /* * images: (numImgColors, imgPixels, numImages) * filters: (numFilterColors, filterPixels, numFilters) * targets: (numFilters, numModules, numImages) */ void filterActs(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numFilters = filters->size[0]; int numModules = numModulesX * numModulesX; int numImages = images->size[0]; int imgPixels = images->size[1]/numImgColors; int imgSize = int(sqrt(imgPixels)); int filterModuleMult = conv ? 1 : numModules; assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(imgSize * imgSize == imgPixels); assert(images->size[1] == imgPixels * numImgColors); int numFiltersPerGroup = numFilters / numGroups; int imgStride = images->size[0]; //images.getStride(); // images does not need to be a contiguous matrix int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); assert(filterSize * filterSize == filterPixels); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); /* assert(!images.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); assert(filters.isContiguous()); assert(targets.isContiguous());*/ dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 8)) : dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 4)); dim3 threads(32, 4); bool checkImgBounds = numImages % 128 != 0; //if (scaleTargets == 0) { // targets.resize(numFilters * numModules, numImages); //} else { assert(targets->size[1] == numFilters * numModules); assert(targets->size[0] == numImages); //} if (numImgColors <= 3) { assert(numGroups == 1); // It has to be based on above definitions, but just to be sure. if (scaleTargets == 0) { // don't scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { // WTF is this shit? Why does it set everything to zero? // There has got to be an explanation. hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else { // do scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else { if (scaleTargets == 0) { // don't scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else { // do scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } } getLastCudaError("filterActs: kernel execution failed"); } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgPixels, numImages) */ void imgActs(cudamat* hidActs, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[0]; int numFilters = filters->size[0]; //int numFiltersPerGroup = numFilters / numGroups; int numModules = hidActs->size[1] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSize * imgSize; int numModulesX = sqrt(numModules); assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs->size[1] == numModules * numFilters); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesX * numModulesX); /* assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans());*/ // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); //assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads(16,16); int colorsPerThread; bool checkCaseBounds; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; int imgsPerThread = 4; assert(numFilterColors % (threads.y * colorsPerThread) == 0); checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); } else if (numFilterColors > 3) { colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4)); checkCaseBounds = numImages % (16*8) != 0; } else { blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4)); checkCaseBounds = numImages % (16*8) != 0; } //if (scaleTargets == 0) { // do not scale or use targets matrix // targets.resize(numImgColors*imgPixels, numImages); //} else { assert(targets->size[1] == numImgColors * imgPixels); assert(targets->size[0] == numImages); //} if (conv) { // convolutional units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // local, unshared units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } getLastCudaError("imgActs: kernel execution failed"); } void weightActs(cudamat* images, cudamat* hidActs, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum, float scaleTargets, float scaleOutput) { int numFilterColors = numImgColors / numGroups; int imgStride = images->size[0]; int numImages = images->size[0]; int imgPixels = images->size[1] / numImgColors; int imgSize = int(sqrt(imgPixels)); int numModules = numModulesX * numModulesX; int numFilters = hidActs->size[1] / numModules; int numFiltersPerGroup = numFilters / numGroups; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(imgSize * imgSize == imgPixels); assert(images->size[1] == imgPixels * numImgColors); int filterPixels = filterSize * filterSize; partialSum = partialSum == 0 ? numModules : partialSum; assert(numModules % partialSum == 0); assert(hidActs->size[0] == numImages); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); assert(numModules * numFilters == hidActs->size[1]); /* assert(!images.isTrans()); assert(!hidActs.isTrans()); assert(hidActs.isContiguous()); assert(!targets.isTrans()); assert(targets.isContiguous());*/ int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4; by = numFiltersPerGroup % 64 == 0 ? 4 : 8; bx = numFiltersPerGroup % 64 == 0 ? 32 : 16; blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread)); } else { assert(numGroups == 1); // Just for sanity pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2); by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best bx = numFilters % 32 == 0 ? 32 : 16; blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread)); } assert((by * bx) % preloadCases == 0); threads = dim3(bx, by); bool checkCaseBounds = numImages % 32 != 0; //if (scaleTargets == 0) { // targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters); //} else { assert(targets->size[1] == (numModules/partialSum) * numFilterColors*filterPixels); assert(targets->size[0] == numFilters); //} if (numFilterColors > 3) { if (scaleTargets == 0) { // do not scale if (numFiltersPerGroup % 64 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else if (numFiltersPerGroup % 32 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } } else { if (numFiltersPerGroup % 64 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<4,32,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else if (numFiltersPerGroup % 32 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,2,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,8,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_mc_mf<8,16,1,4,32,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } } } else { // numColors in 1,2,3 if (scaleTargets == 0) { // do not scale if (numFilterColors == 1) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 3) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors == 1) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,8,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,5,32,1,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,2,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 3) { if (checkCaseBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<4,32,5,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( conv_weight_acts_c<8,16,2,32,3,true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } } } getLastCudaError("weightActs: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, outputs, numImages) */ template<class Pooler> void convLocalPool(cudamat* images, cudamat* target, int numFilters, int subsX, int startX, int strideX, int outputsX, Pooler pooler) { int numImages = images->size[0]; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); */ // assert(numFilters % 4 == 0); // assert(numImages % 128 == 0); // int outputs = outputsX * outputsX; //target.resize(numFilters*outputs, numImages); if (strideX == 1 && subsX >= 6) { int imgsPerThread = 8; int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread); if (filtersPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 1, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 1, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 2, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 2, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 3, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 3, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalPool2<Pooler, 8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } } else { bool checkCaseBounds = numImages % 128 != 0; int filtersPerThread = numFilters % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX); if (filtersPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalPool<Pooler, 4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } } getLastCudaError("convLocalPool: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[0]; int numFilters = maxGrads->size[1] / outputs; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads->size[1] == numFilters * outputs); assert(maxGrads->size[0] == numImages); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); */ assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { hipLaunchKernelGGL(( kLocalMaxUndo<4, 32, 4, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } getLastCudaError("convLocalMaxUndo: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * rnd: (numFilters, imgPixels, numImages) * target: (numFilters, outputs, numImages) */ template<class Pooler> void convLocalProbPool(cudamat* images, cudamat* rnd, cudamat* target, int numFilters, int subsX, int startX, int strideX, int outputsX, Pooler pooler) { int numImages = images->size[0]; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(rnd->size[0] == images->size[0]); assert(rnd->size[1] == images->size[1]); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); */ // assert(numFilters % 4 == 0); // assert(numImages % 128 == 0); // int outputs = outputsX * outputsX; //target.resize(numFilters*outputs, numImages); if (strideX == 1 && subsX >= 6) { int imgsPerThread = 8; int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread); if (filtersPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 1, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 1, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 1, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 1, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 2) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 2, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 2, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 2, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 2, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 3) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 3, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 3, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 3, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 3, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 4) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 4, true>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 4, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 4, false>, hipFuncCachePreferShared); hipLaunchKernelGGL(( kLocalProbPool2<Pooler, 8, 8, 4, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } } else { bool checkCaseBounds = numImages % 128 != 0; int filtersPerThread = numFilters % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX); if (filtersPerThread == 1) { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 1, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalProbPool<Pooler, 4, 32, 4, 1, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 1, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalProbPool<Pooler, 4, 32, 4, 1, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } else { if (checkCaseBounds) { hipFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 2, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalProbPool<Pooler, 4, 32, 4, 2, true>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { hipFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 2, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLocalProbPool<Pooler, 4, 32, 4, 2, false>), dim3(blocks), dim3(threads), 0, 0, images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } } getLastCudaError("convLocalPool: kernel execution failed"); } void convResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[0]; int imgPixels = outGrads->size[1] / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads->size[1] == numFilters * imgPixels); //assert(denoms.isSameDims(outGrads)); //assert(acts.isSameDims(denoms)); //assert(!denoms.isTrans()); //assert(!outGrads.isTrans()); //assert(!acts.isTrans()); //assert(!target.isTrans()); //assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); //target.resize(outGrads); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads->size[0]*outGrads->size[1],(threads.x * prelimEltsPerThread)))); hipLaunchKernelGGL(( kRNormUndoPrelims<128, 4>), dim3(blocks), dim3(threads), 0, 0, acts->data_device, denoms->data_device, outGrads->data_device, outGrads->size[0]*outGrads->size[1], -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo2<16, 8, 4, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { bool checkCaseBounds = numImages % 128 != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*2) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, true>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, true>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, false, false>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { hipFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kRNormUndo<4, 32, 2, 2, true, false>), dim3(blocks), dim3(threads), 0, 0, outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } getLastCudaError("kRNormUndo: kernel execution failed"); } void convContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } void convResponseNorm(cudamat* images, cudamat* denoms, cudamat* target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } extern "C" { // Convolutions. extern void convUp(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups){ filterActs(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } extern void convDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups){ imgActs(images, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } extern void convOutp(cudamat* images, cudamat* hidSums, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum){ weightActs(images, hidSums, targets, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1); } // Local Connections. extern void localUp(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups){ filterActs(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } extern void localDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups){ imgActs(images, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } extern void localOutp(cudamat* images, cudamat* hidSums, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum){ weightActs(images, hidSums, targets, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1); } // Response Normalization. extern void ResponseNorm(cudamat* images, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convResponseNorm(images, denoms, targets, numFilters, sizeX, addScale, powScale); } extern void ResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convResponseNormUndo(outGrads, denoms, inputs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1); } // Contrast Normalization. extern void ContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convContrastNorm(images, meanDiffs, denoms, targets, numFilters, sizeX, addScale, powScale); } extern void ContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convContrastNormUndo(outGrads, denoms, meanDiffs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1); } // Pooling. extern void MaxPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){ MaxPooler mpooler; convLocalPool<MaxPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, mpooler); } extern void ProbMaxPool(cudamat* images, cudamat* rnd, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){ ProbMaxPooler mpooler; convLocalProbPool<ProbMaxPooler>(images, rnd, targets, numFilters, subsX, startX, strideX, outputsX, mpooler); } extern void MaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* targets, int subsX, int startX, int strideX, int outputsX){ convLocalMaxUndo(images, maxGrads, maxActs, targets, subsX, startX, strideX, outputsX, 0, 1); } }
bfbaa92bf45d52da5f245bfaeb12a61921195c47.cu
#include <stdio.h> #include <stdlib.h> #include <cublas.h> #include <math.h> #include <assert.h> #include "cudamat_conv_kernels.cuh" /* * images: (numImgColors, imgPixels, numImages) * filters: (numFilterColors, filterPixels, numFilters) * targets: (numFilters, numModules, numImages) */ void filterActs(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numFilters = filters->size[0]; int numModules = numModulesX * numModulesX; int numImages = images->size[0]; int imgPixels = images->size[1]/numImgColors; int imgSize = int(sqrt(imgPixels)); int filterModuleMult = conv ? 1 : numModules; assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(imgSize * imgSize == imgPixels); assert(images->size[1] == imgPixels * numImgColors); int numFiltersPerGroup = numFilters / numGroups; int imgStride = images->size[0]; //images.getStride(); // images does not need to be a contiguous matrix int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = int(sqrt(filterPixels)); assert(filterSize * filterSize == filterPixels); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); /* assert(!images.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans()); assert(filters.isContiguous()); assert(targets.isContiguous());*/ dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 8)) : dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 4)); dim3 threads(32, 4); bool checkImgBounds = numImages % 128 != 0; //if (scaleTargets == 0) { // targets.resize(numFilters * numModules, numImages); //} else { assert(targets->size[1] == numFilters * numModules); assert(targets->size[0] == numImages); //} if (numImgColors <= 3) { assert(numGroups == 1); // It has to be based on above definitions, but just to be sure. if (scaleTargets == 0) { // don't scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { // WTF is this shit? Why does it set everything to zero? // There has got to be an explanation. cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else { // do scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else { if (scaleTargets == 0) { // don't scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else { // do scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(images->data_device, filters->data_device, targets->data_device, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } } getLastCudaError("filterActs: kernel execution failed"); } /* * hidActs: (numFilters, numModules, numImages) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * targets: (numImageColors, imgPixels, numImages) */ void imgActs(cudamat* hidActs, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, float scaleOutput, bool conv) { int numFilterColors = numImgColors / numGroups; int numImages = hidActs->size[0]; int numFilters = filters->size[0]; //int numFiltersPerGroup = numFilters / numGroups; int numModules = hidActs->size[1] / numFilters; int filterModuleMult = conv ? 1 : numModules; int filterPixels = filters->size[1] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSize * imgSize; int numModulesX = sqrt(numModules); assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(filterPixels == filterSize * filterSize); assert(hidActs->size[1] == numModules * numFilters); assert(filters->size[1] == filterModuleMult * numFilterColors * filterPixels); assert(numModules == numModulesX * numModulesX); /* assert(hidActs.isContiguous()); assert(filters.isContiguous()); assert(!hidActs.isTrans()); assert(!filters.isTrans()); assert(!targets.isTrans());*/ // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); //assert(targets.isContiguous()); // no stride support here! dim3 blocks; dim3 threads(16,16); int colorsPerThread; bool checkCaseBounds; if (numFilterColors % 8 == 0) { threads = dim3(32, 4); colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2; int imgsPerThread = 4; assert(numFilterColors % (threads.y * colorsPerThread) == 0); checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0; blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels); } else if (numFilterColors > 3) { colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2; blocks = dim3(DIVUP(numImages,16*8) * (numImgColors / colorsPerThread), DIVUP(imgSize,4) * DIVUP(imgSize,4)); checkCaseBounds = numImages % (16*8) != 0; } else { blocks = dim3(DIVUP(numImages,16*8), DIVUP(imgSize,4) * DIVUP(imgSize,4)); checkCaseBounds = numImages % (16*8) != 0; } //if (scaleTargets == 0) { // do not scale or use targets matrix // targets.resize(numImgColors*imgPixels, numImages); //} else { assert(targets->size[1] == numImgColors * imgPixels); assert(targets->size[0] == numImages); //} if (conv) { // convolutional units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } else { // local, unshared units if (scaleTargets == 0) { // do not scale or use targets matrix if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors % 8 == 0) { if (checkCaseBounds) { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (numFilterColors % 16 == 0) { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared); conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else if (numFilterColors > 3) { if (checkCaseBounds) { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } else { if (colorsPerThread == 4) { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput); } } } else { if (checkCaseBounds) { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } else { if (numFilterColors == 1) { cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 2) { cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } else if (numFilterColors == 3) { cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared); img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs->data_device, filters->data_device, targets->data_device, numModulesX, numImages, numFilters, filterSize, imgSize, paddingStart, moduleStride, scaleTargets, scaleOutput); } } } } } getLastCudaError("imgActs: kernel execution failed"); } void weightActs(cudamat* images, cudamat* hidActs, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum, float scaleTargets, float scaleOutput) { int numFilterColors = numImgColors / numGroups; int imgStride = images->size[0]; int numImages = images->size[0]; int imgPixels = images->size[1] / numImgColors; int imgSize = int(sqrt(imgPixels)); int numModules = numModulesX * numModulesX; int numFilters = hidActs->size[1] / numModules; int numFiltersPerGroup = numFilters / numGroups; assert(numImgColors % numGroups == 0); assert(numFilters % (16*numGroups) == 0); assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 4 == 0))); assert(numGroups == 1 || numFilterColors % 4 == 0); assert(imgSize * imgSize == imgPixels); assert(images->size[1] == imgPixels * numImgColors); int filterPixels = filterSize * filterSize; partialSum = partialSum == 0 ? numModules : partialSum; assert(numModules % partialSum == 0); assert(hidActs->size[0] == numImages); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); assert(numModules * numFilters == hidActs->size[1]); /* assert(!images.isTrans()); assert(!hidActs.isTrans()); assert(hidActs.isContiguous()); assert(!targets.isTrans()); assert(targets.isContiguous());*/ int preloadCases = 32; dim3 blocks, threads; int bx, by; int pixelsPerThread, filtersPerThread, colorsPerThread; // Worth playing with these parameters to find best values for your problem. // These values work relatively well, but not optimal for all problems. if (numFilterColors > 3) { filtersPerThread = numFiltersPerGroup % 32 == 0 ? 2 : 1; colorsPerThread = numFilterColors % 8 == 0 ? 8 : 4; by = numFiltersPerGroup % 64 == 0 ? 4 : 8; bx = numFiltersPerGroup % 64 == 0 ? 32 : 16; blocks = dim3((numModules/partialSum)*(numFilters/(bx*filtersPerThread)), DIVUP(filterPixels, by) * (numFilterColors / colorsPerThread)); } else { assert(numGroups == 1); // Just for sanity pixelsPerThread = numFilters % 32 == 0 ? (numImgColors == 1 ? 8 : 5) : (numImgColors == 1 ? 5 : 2); by = numFilters % 32 == 0 ? 4 : 8; // by == 4 seems to work best bx = numFilters % 32 == 0 ? 32 : 16; blocks = dim3((numModules/partialSum)*(numFilters/bx), DIVUP(filterPixels, by*pixelsPerThread)); } assert((by * bx) % preloadCases == 0); threads = dim3(bx, by); bool checkCaseBounds = numImages % 32 != 0; //if (scaleTargets == 0) { // targets.resize((numModules/partialSum) * numFilterColors*filterPixels, numFilters); //} else { assert(targets->size[1] == (numModules/partialSum) * numFilterColors*filterPixels); assert(targets->size[0] == numFilters); //} if (numFilterColors > 3) { if (scaleTargets == 0) { // do not scale if (numFiltersPerGroup % 64 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,8,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,8,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,4,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,4,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else if (numFiltersPerGroup % 32 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,8,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,8,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,4,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,4,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,8,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,8,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,4,32,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,4,32,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } } else { if (numFiltersPerGroup % 64 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,8,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,8,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,4,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<4,32,2,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<4,32,2,4,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else if (numFiltersPerGroup % 32 == 0) { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,8,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,8,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,4,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,2,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,2,4,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } else { if (numFilterColors % 8 == 0) { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,8,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,8,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,8,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, true>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,4,32,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_mc_mf<8,16,1,4,32, false, false>, cudaFuncCachePreferShared); conv_weight_acts_mc_mf<8,16,1,4,32,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, numImgColors, numGroups, partialSum, scaleTargets, scaleOutput); } } } } } else { // numColors in 1,2,3 if (scaleTargets == 0) { // do not scale if (numFilterColors == 1) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,8,32,1,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,5,32,1,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,8,32,1,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,5,32,1,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,2,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,2,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,2,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,2,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 3) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,3,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,3,false, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,3,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, false, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,3,false, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } } else { // do scale if (numFilterColors == 1) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,8,32,1,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,5,32,1,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,8,32,1, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,8,32,1,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,5,32,1, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,5,32,1,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 2) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,2,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,2,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,2, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,2,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,2, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,2,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } else if (numFilterColors == 3) { if (checkCaseBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,3,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, true>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,3,true, true><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(conv_weight_acts_c<4,32,5,32,3, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<4,32,5,32,3,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(conv_weight_acts_c<8,16,2,32,3, true, false>, cudaFuncCachePreferShared); conv_weight_acts_c<8,16,2,32,3,true, false><<<blocks, threads>>>(images->data_device, hidActs->data_device, targets->data_device, numImages, numFilters, numModulesX, imgSize, filterSize, paddingStart, moduleStride, imgStride, partialSum, scaleTargets, scaleOutput); } } } } } getLastCudaError("weightActs: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * target: (numFilters, outputs, numImages) */ template<class Pooler> void convLocalPool(cudamat* images, cudamat* target, int numFilters, int subsX, int startX, int strideX, int outputsX, Pooler pooler) { int numImages = images->size[0]; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); */ // assert(numFilters % 4 == 0); // assert(numImages % 128 == 0); // int outputs = outputsX * outputsX; //target.resize(numFilters*outputs, numImages); if (strideX == 1 && subsX >= 6) { int imgsPerThread = 8; int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread); if (filtersPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, true>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 1, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 1, false>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 1, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, true>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 2, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 2, false>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 2, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, true>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 3, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 3, false>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 3, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, true>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 4, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool2<Pooler, 8, 8, 4, false>, cudaFuncCachePreferShared); kLocalPool2<Pooler, 8, 8, 4, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } } else { bool checkCaseBounds = numImages % 128 != 0; int filtersPerThread = numFilters % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX); if (filtersPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, true>, cudaFuncCachePreferL1); kLocalPool<Pooler, 4, 32, 4, 1, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 1, false>, cudaFuncCachePreferL1); kLocalPool<Pooler, 4, 32, 4, 1, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 2, true>, cudaFuncCachePreferL1); kLocalPool<Pooler, 4, 32, 4, 2, true><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalPool<Pooler, 4, 32, 4, 2, false>, cudaFuncCachePreferL1); kLocalPool<Pooler, 4, 32, 4, 2, false><<<blocks, threads>>>(images->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } } getLastCudaError("convLocalPool: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * maxGrads: (numFilters, numOutputs, numImages) * rMaxActs: (numFilters, numOutputs, numImages) * target: (numFilters, imgPixels, numImages) */ void convLocalMaxUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* target, int subsX, int startX, int strideX, int outputsX, float scaleTargets, float scaleOutput) { int outputs = outputsX * outputsX; int numImages = images->size[0]; int numFilters = maxGrads->size[1] / outputs; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(maxGrads->size[1] == numFilters * outputs); assert(maxGrads->size[0] == numImages); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(!maxGrads.isTrans()); assert(!maxActs.isTrans()); assert(images.isContiguous()); assert(maxGrads.isContiguous()); assert(maxActs.isContiguous()); assert(maxGrads.isSameDims(maxActs)); */ assert(numFilters % 16 == 0); // assert(numImages % 128 == 0); assert(strideX <= subsX); //target.resize(images); int checkCaseBounds = numImages % 128 != 0; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, true><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, true><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { kLocalMaxUndo<4, 32, 4, 2, false, false><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } else { kLocalMaxUndo<4, 32, 4, 2, true, false><<<blocks, threads>>>(images->data_device, maxGrads->data_device, maxActs->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, scaleTargets, scaleOutput); } } getLastCudaError("convLocalMaxUndo: kernel execution failed"); } /* * imgs: (numFilters, imgPixels, numImages) * rnd: (numFilters, imgPixels, numImages) * target: (numFilters, outputs, numImages) */ template<class Pooler> void convLocalProbPool(cudamat* images, cudamat* rnd, cudamat* target, int numFilters, int subsX, int startX, int strideX, int outputsX, Pooler pooler) { int numImages = images->size[0]; int imgPixels = images->size[1] / numFilters; assert(images->size[1] == numFilters * imgPixels); int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(rnd->size[0] == images->size[0]); assert(rnd->size[1] == images->size[1]); /* assert(!images.isTrans()); assert(!target.isTrans()); assert(images.isContiguous()); */ // assert(numFilters % 4 == 0); // assert(numImages % 128 == 0); // int outputs = outputsX * outputsX; //target.resize(numFilters*outputs, numImages); if (strideX == 1 && subsX >= 6) { int imgsPerThread = 8; int filtersPerThread = numFilters % 4 == 0 ? 4 : numFilters % 3 == 0 ? 3 : numFilters % 2 == 0 ? 2 : 1; int bx = 8; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); assert(numFilters % filtersPerThread == 0); dim3 threads(bx, 16); dim3 blocks(DIVUP(outputsX, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(outputsX, 4) * numFilters / filtersPerThread); if (filtersPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 1, true>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 1, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 1, false>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 1, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 2) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 2, true>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 2, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 2, false>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 2, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 3) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 3, true>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 3, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 3, false>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 3, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } else if (filtersPerThread == 4) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 4, true>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 4, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool2<Pooler, 8, 8, 4, false>, cudaFuncCachePreferShared); kLocalProbPool2<Pooler, 8, 8, 4, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, outputsX, pooler); } } } else { bool checkCaseBounds = numImages % 128 != 0; int filtersPerThread = numFilters % 8 == 0 ? 2 : 1; dim3 threads(32, 4); dim3 blocks(DIVUP(numImages,32*4) * outputsX, DIVUP(numFilters, 4 * filtersPerThread) * outputsX); if (filtersPerThread == 1) { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 1, true>, cudaFuncCachePreferL1); kLocalProbPool<Pooler, 4, 32, 4, 1, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 1, false>, cudaFuncCachePreferL1); kLocalProbPool<Pooler, 4, 32, 4, 1, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } else { if (checkCaseBounds) { cudaFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 2, true>, cudaFuncCachePreferL1); kLocalProbPool<Pooler, 4, 32, 4, 2, true><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } else { cudaFuncSetCacheConfig(kLocalProbPool<Pooler, 4, 32, 4, 2, false>, cudaFuncCachePreferL1); kLocalProbPool<Pooler, 4, 32, 4, 2, false><<<blocks, threads>>>(images->data_device, rnd->data_device, target->data_device, imgSize, numFilters, numImages, subsX, startX, strideX, outputsX, pooler); } } } getLastCudaError("convLocalPool: kernel execution failed"); } void convResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { int numImages = outGrads->size[0]; int imgPixels = outGrads->size[1] / numFilters; int imgSize = int(sqrt(imgPixels)); assert(imgSize * imgSize == imgPixels); assert(outGrads->size[1] == numFilters * imgPixels); //assert(denoms.isSameDims(outGrads)); //assert(acts.isSameDims(denoms)); //assert(!denoms.isTrans()); //assert(!outGrads.isTrans()); //assert(!acts.isTrans()); //assert(!target.isTrans()); //assert(outGrads.isContiguous()); assert(numFilters % 16 == 0); //target.resize(outGrads); // First do acts := -2 x scale x acts x outGrads / denoms // so that the main routine only has to do an addition in its inner loop. int prelimEltsPerThread = 4; dim3 threads(128); dim3 blocks(MIN(512, DIVUP(outGrads->size[0]*outGrads->size[1],(threads.x * prelimEltsPerThread)))); kRNormUndoPrelims<128, 4><<<blocks, threads>>>(acts->data_device, denoms->data_device, outGrads->data_device, outGrads->size[0]*outGrads->size[1], -2*addScale*powScale); // Now the main routine if (sizeX >= 6 && numFilters % 4 == 0) { // This one is faster for large regions (my tests show regions >= 6...) int imgsPerThread = 8; int filtersPerThread = 4; int bx = 16; bool checkCaseBounds = numImages % (bx*imgsPerThread) != 0; assert((imgsPerThread * bx) % 32 == 0); threads = dim3(bx, 16); blocks = dim3(DIVUP(imgSize, 4) * DIVUP(numImages, bx*imgsPerThread), DIVUP(imgSize, 4) * numFilters / filtersPerThread); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, true><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, true>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, true><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, true, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, true, false><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo2<16, 8, 4, false, false>, cudaFuncCachePreferL1); kRNormUndo2<16, 8, 4, false, false><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } else { bool checkCaseBounds = numImages % 128 != 0; threads = dim3(32, 4); blocks = dim3(DIVUP(numImages,32*2) * imgSize, (numFilters / (4 * 2)) * imgSize); if (checkCaseBounds) { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, true><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, true>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, true><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } else { if (scaleTargets == 0 && scaleOutput == 1) { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, false, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, false, false><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } else { cudaFuncSetCacheConfig(kRNormUndo<4, 32, 2, 2, true, false>, cudaFuncCachePreferL1); kRNormUndo<4, 32, 2, 2, true, false><<<blocks, threads>>>(outGrads->data_device, denoms->data_device, inputs->data_device, acts->data_device, target->data_device, imgSize, numFilters, numImages, sizeX, powScale, scaleTargets, scaleOutput); } } } getLastCudaError("kRNormUndo: kernel execution failed"); } void convContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* target, int numFilters, int sizeX, float addScale, float powScale, float scaleTargets, float scaleOutput) { convResponseNormUndo(outGrads, denoms, meanDiffs, acts, target, numFilters, sizeX, addScale, powScale, scaleTargets, scaleOutput); } void convResponseNorm(cudamat* images, cudamat* denoms, cudamat* target, int numFilters, int sizeX, float addScale, float powScale) { convContrastNorm(images, images, denoms, target, numFilters, sizeX, addScale, powScale); } extern "C" { // Convolutions. extern void convUp(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups){ filterActs(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } extern void convDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups){ imgActs(images, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true); } extern void convOutp(cudamat* images, cudamat* hidSums, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum){ weightActs(images, hidSums, targets, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, partialSum, 0, 1); } // Local Connections. extern void localUp(cudamat* images, cudamat* filters, cudamat* targets, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups){ filterActs(images, filters, targets, numModulesX, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } extern void localDown(cudamat* images, cudamat* filters, cudamat* targets, int imgSize, int paddingStart, int moduleStride, int numImgColors, int numGroups){ imgActs(images, filters, targets, imgSize, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false); } extern void localOutp(cudamat* images, cudamat* hidSums, cudamat* targets, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int partialSum){ weightActs(images, hidSums, targets, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, 1, 0, 1); } // Response Normalization. extern void ResponseNorm(cudamat* images, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convResponseNorm(images, denoms, targets, numFilters, sizeX, addScale, powScale); } extern void ResponseNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* inputs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convResponseNormUndo(outGrads, denoms, inputs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1); } // Contrast Normalization. extern void ContrastNorm(cudamat* images, cudamat* meanDiffs, cudamat* denoms, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convContrastNorm(images, meanDiffs, denoms, targets, numFilters, sizeX, addScale, powScale); } extern void ContrastNormUndo(cudamat* outGrads, cudamat* denoms, cudamat* meanDiffs, cudamat* acts, cudamat* targets, int numFilters, int sizeX, float addScale, float powScale){ convContrastNormUndo(outGrads, denoms, meanDiffs, acts, targets, numFilters, sizeX, addScale, powScale, 0, 1); } // Pooling. extern void MaxPool(cudamat* images, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){ MaxPooler mpooler; convLocalPool<MaxPooler>(images, targets, numFilters, subsX, startX, strideX, outputsX, mpooler); } extern void ProbMaxPool(cudamat* images, cudamat* rnd, cudamat* targets, int numFilters, int subsX, int startX, int strideX, int outputsX){ ProbMaxPooler mpooler; convLocalProbPool<ProbMaxPooler>(images, rnd, targets, numFilters, subsX, startX, strideX, outputsX, mpooler); } extern void MaxPoolUndo(cudamat* images, cudamat* maxGrads, cudamat* maxActs, cudamat* targets, int subsX, int startX, int strideX, int outputsX){ convLocalMaxUndo(images, maxGrads, maxActs, targets, subsX, startX, strideX, outputsX, 0, 1); } }
d64bc8244f54bbe44735f40d6d0dc6ebe10748e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> __global__ void add_OneBlockOneThread(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } __global__ void add_OneBlockManyThreads(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } __global__ void add_ManyBlocksManyThreads(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; std::chrono::steady_clock::time_point start, end; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); int numberOfThreads; std::cout << "Input number of threads: "; std::cin >> numberOfThreads; std::cout << std::endl; // ========== One block, one thread ========== // for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( add_OneBlockOneThread) , dim3(1), dim3(1) , 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // ========== One block, many threads ========== // start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( add_OneBlockManyThreads) , dim3(1), dim3(numberOfThreads) , 0, 0, N, x, y); hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // ========== Many Blocks, many threads ========== // for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } start = std::chrono::steady_clock::now(); int blockSize = numberOfThreads; int numBlocks = (N + blockSize - 1) / blockSize; hipLaunchKernelGGL(( add_ManyBlocksManyThreads) , dim3(numBlocks), dim3(blockSize) , 0, 0, N, x, y); hipDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
d64bc8244f54bbe44735f40d6d0dc6ebe10748e6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> #include <chrono> __global__ void add_OneBlockOneThread(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } __global__ void add_OneBlockManyThreads(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } __global__ void add_ManyBlocksManyThreads(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; // 1048576 elements float *x, *y; std::chrono::steady_clock::time_point start, end; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); int numberOfThreads; std::cout << "Input number of threads: "; std::cin >> numberOfThreads; std::cout << std::endl; // ========== One block, one thread ========== // for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } start = std::chrono::steady_clock::now(); add_OneBlockOneThread <<< 1, 1 >>> (N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // ========== One block, many threads ========== // start = std::chrono::steady_clock::now(); add_OneBlockManyThreads <<< 1, numberOfThreads >>> (N, x, y); cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // ========== Many Blocks, many threads ========== // for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } start = std::chrono::steady_clock::now(); int blockSize = numberOfThreads; int numBlocks = (N + blockSize - 1) / blockSize; add_ManyBlocksManyThreads <<< numBlocks, blockSize >>> (N, x, y); cudaDeviceSynchronize(); end = std::chrono::steady_clock::now(); std::cout << "Elapsed time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
4829854a44e8e8715b18d52ace59405cfe9827b2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ShuffleRGB.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ShuffleRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ShuffleRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ShuffleRGB), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4829854a44e8e8715b18d52ace59405cfe9827b2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ShuffleRGB.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int size = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ShuffleRGB<<<gridBlock,threadBlock>>>(input,output,size); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ShuffleRGB<<<gridBlock,threadBlock>>>(input,output,size); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ShuffleRGB<<<gridBlock,threadBlock>>>(input,output,size); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ff824aa4284e4529ceb2e604e0b30cf7902c186d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zpotf2_kernels_var.cu, normal z -> c, Mon Jun 25 18:24:17 2018 */ #define PRECISION_c #include "magma_internal.h" #include "batched_kernel_param.h" #include "magma_templates.h" //#define VBATCH_DISABLE_THREAD_RETURN #ifdef VBATCH_DISABLE_THREAD_RETURN #define ENABLE_COND1 #define ENABLE_COND2 #define ENABLE_COND4 #define ENABLE_COND5 #define ENABLE_COND6 #endif #define MAX_NTCOL 1 #include "cpotf2_devicesfunc.cuh" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cpotf2_smlpout_kernel_vbatched_v2(int maxm, magma_int_t *m, magmaFloatComplex **dA_array, magma_int_t *lda, int localstep, int gbstep, magma_int_t *info_array) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int mylda = (int)lda[batchid]; const int myoff = ((maxm - my_m)/POTF2_NB)*POTF2_NB; const int mylocstep = localstep - myoff; const int myrows = mylocstep >= 0 ? my_m-mylocstep : 0; const int myib = min(POTF2_NB, myrows); #ifndef VBATCH_DISABLE_THREAD_RETURN const int tx = threadIdx.x; if(tx >= myrows) return; #else if(myrows <= 0) return; #endif if(myib == POTF2_NB) cpotf2_smlpout_fixwidth_device( myrows, dA_array[batchid]+mylocstep, dA_array[batchid]+mylocstep+mylocstep*mylda, mylda, mylocstep, gbstep, &(info_array[batchid])); else cpotf2_smlpout_anywidth_device( myrows, myib, dA_array[batchid]+mylocstep, dA_array[batchid]+mylocstep+mylocstep*mylda, mylda, mylocstep, gbstep, &(info_array[batchid])); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cpotf2_smlpout_kernel_vbatched(magma_int_t *m, magmaFloatComplex **dA_array, magma_int_t *lda, int localstep, int gbstep, magma_int_t *info_array) { const int batchid = blockIdx.z; const int myrows = (int)m[batchid] - localstep; const int myib = min(POTF2_NB, myrows); const int mylda = lda[batchid]; #ifndef VBATCH_DISABLE_THREAD_RETURN const int tx = threadIdx.x; if(tx >= myrows) return; #else if(myrows <= 0) return; #endif if(myib == POTF2_NB) cpotf2_smlpout_fixwidth_device( myrows, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*mylda, mylda, localstep, gbstep, &(info_array[batchid])); else cpotf2_smlpout_anywidth_device( myrows, myib, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*mylda, mylda, localstep, gbstep, &(info_array[batchid])); } ///////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_cpotrf_lpout_vbatched( magma_uplo_t uplo, magma_int_t *n, magma_int_t max_n, magmaFloatComplex **dA_array, magma_int_t *lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t arginfo = 0; // Quick return if possible if (max_n <= 0) { arginfo = -33; // any value for now return arginfo; } dim3 dimGrid(1, 1, batchCount); for(magma_int_t j = 0; j < max_n; j+= POTF2_NB) { magma_int_t rows_max = max_n-j; magma_int_t nbth = rows_max; dim3 threads(nbth, 1); magma_int_t shared_mem_size = sizeof(magmaFloatComplex)*(nbth+POTF2_NB)*POTF2_NB; if(shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } //cpotf2_smlpout_kernel_vbatched<<<dimGrid, threads, shared_mem_size, queue >>>(n, dA_array, lda, j, gbstep, info_array); hipLaunchKernelGGL(( cpotf2_smlpout_kernel_vbatched_v2) , dim3(dimGrid), dim3(threads), shared_mem_size, queue->cuda_stream() , max_n, n, dA_array, lda, j, gbstep, info_array); } return arginfo; }
ff824aa4284e4529ceb2e604e0b30cf7902c186d.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zpotf2_kernels_var.cu, normal z -> c, Mon Jun 25 18:24:17 2018 */ #define PRECISION_c #include "magma_internal.h" #include "batched_kernel_param.h" #include "magma_templates.h" //#define VBATCH_DISABLE_THREAD_RETURN #ifdef VBATCH_DISABLE_THREAD_RETURN #define ENABLE_COND1 #define ENABLE_COND2 #define ENABLE_COND4 #define ENABLE_COND5 #define ENABLE_COND6 #endif #define MAX_NTCOL 1 #include "cpotf2_devicesfunc.cuh" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cpotf2_smlpout_kernel_vbatched_v2(int maxm, magma_int_t *m, magmaFloatComplex **dA_array, magma_int_t *lda, int localstep, int gbstep, magma_int_t *info_array) { const int batchid = blockIdx.z; const int my_m = (int)m[batchid]; const int mylda = (int)lda[batchid]; const int myoff = ((maxm - my_m)/POTF2_NB)*POTF2_NB; const int mylocstep = localstep - myoff; const int myrows = mylocstep >= 0 ? my_m-mylocstep : 0; const int myib = min(POTF2_NB, myrows); #ifndef VBATCH_DISABLE_THREAD_RETURN const int tx = threadIdx.x; if(tx >= myrows) return; #else if(myrows <= 0) return; #endif if(myib == POTF2_NB) cpotf2_smlpout_fixwidth_device( myrows, dA_array[batchid]+mylocstep, dA_array[batchid]+mylocstep+mylocstep*mylda, mylda, mylocstep, gbstep, &(info_array[batchid])); else cpotf2_smlpout_anywidth_device( myrows, myib, dA_array[batchid]+mylocstep, dA_array[batchid]+mylocstep+mylocstep*mylda, mylda, mylocstep, gbstep, &(info_array[batchid])); } ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void cpotf2_smlpout_kernel_vbatched(magma_int_t *m, magmaFloatComplex **dA_array, magma_int_t *lda, int localstep, int gbstep, magma_int_t *info_array) { const int batchid = blockIdx.z; const int myrows = (int)m[batchid] - localstep; const int myib = min(POTF2_NB, myrows); const int mylda = lda[batchid]; #ifndef VBATCH_DISABLE_THREAD_RETURN const int tx = threadIdx.x; if(tx >= myrows) return; #else if(myrows <= 0) return; #endif if(myib == POTF2_NB) cpotf2_smlpout_fixwidth_device( myrows, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*mylda, mylda, localstep, gbstep, &(info_array[batchid])); else cpotf2_smlpout_anywidth_device( myrows, myib, dA_array[batchid]+localstep, dA_array[batchid]+localstep+localstep*mylda, mylda, localstep, gbstep, &(info_array[batchid])); } ///////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_cpotrf_lpout_vbatched( magma_uplo_t uplo, magma_int_t *n, magma_int_t max_n, magmaFloatComplex **dA_array, magma_int_t *lda, magma_int_t gbstep, magma_int_t *info_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t arginfo = 0; // Quick return if possible if (max_n <= 0) { arginfo = -33; // any value for now return arginfo; } dim3 dimGrid(1, 1, batchCount); for(magma_int_t j = 0; j < max_n; j+= POTF2_NB) { magma_int_t rows_max = max_n-j; magma_int_t nbth = rows_max; dim3 threads(nbth, 1); magma_int_t shared_mem_size = sizeof(magmaFloatComplex)*(nbth+POTF2_NB)*POTF2_NB; if(shared_mem_size > 47000) { arginfo = -33; magma_xerbla( __func__, -(arginfo) ); return arginfo; } //cpotf2_smlpout_kernel_vbatched<<<dimGrid, threads, shared_mem_size, queue >>>(n, dA_array, lda, j, gbstep, info_array); cpotf2_smlpout_kernel_vbatched_v2 <<<dimGrid, threads, shared_mem_size, queue->cuda_stream() >>> (max_n, n, dA_array, lda, j, gbstep, info_array); } return arginfo; }
d359d45c9d26acc7fda5239ee06b08bd4dae6edb.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hip/hip_runtime.h> #include "core/providers/cuda/math/binary_elementwise_ops_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh" #include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh" namespace onnxruntime { namespace cuda { #define BINARY_ELEMENTWISE_IMPL(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T, T>(), \ count); \ } #define BINARY_ELEMENTWISE_IMPL_T1(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION_T1(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T, T1>(), \ count); \ } #define BINARY_ELEMENTWISE_IMPL_T2(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION_T2(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T1, T2>(), \ count); \ } #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \ template void Impl_##x<T>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, T1) \ template void ImplT1_##x<T, T1>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T1* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(x, T, T1, T2) \ template void ImplT2_##x<T, T1, T2>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T1* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T2* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(x, T) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, double) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) // create declarations for impl #define BINARY_OP_NAME_EXPR(name, expr) \ BINARY_ELEMENTWISE_IMPL(name) BINARY_OPS() #undef BINARY_OP_NAME_EXPR // create specialized impl // the postfix of means the types supported by the op: // B: uint8_t // W: uint16_t // U: uint32_t // Z: uint64_t // C: int8_t // S: int16_t // I: int32_t // L: int64_t // H: float16 // F: float // D: double // O: bool SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Add, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow_7) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Max) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Min) // create declarations for impl for Pow BINARY_ELEMENTWISE_IMPL_T1(Pow) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int32_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int64_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, float) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, double) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, half) // create declarations for impl2 #define BINARY_OP_NAME_EXPR2(name, expr) \ BINARY_ELEMENTWISE_IMPL_T2(name) BINARY_OPS2() #undef BINARY_OP_NAME_EXPR2 #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(name) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint32_t, uint32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint64_t, uint64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int32_t, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int64_t, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, half, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, float, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, double, double) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Greater) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, bool, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, int32_t, int32_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, int64_t, int64_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Less) } // namespace cuda } // namespace onnxruntime
d359d45c9d26acc7fda5239ee06b08bd4dae6edb.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cuda_runtime.h> #include "core/providers/cuda/math/binary_elementwise_ops_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh" #include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh" namespace onnxruntime { namespace cuda { #define BINARY_ELEMENTWISE_IMPL(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T, T>(), \ count); \ } #define BINARY_ELEMENTWISE_IMPL_T1(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION_T1(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T, T1>(), \ count); \ } #define BINARY_ELEMENTWISE_IMPL_T2(name) \ BINARY_ELEMENTWISE_IMPL_DECLARATION_T2(name) { \ BinaryElementWiseImpl(output_rank_or_simple_broadcast, \ lhs_padded_strides, \ lhs_data, \ rhs_padded_strides, \ rhs_data, \ fdm_output_strides, \ fdm_H, \ fdm_C, \ output_data, \ OP_##name<T, T1, T2>(), \ count); \ } #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \ template void Impl_##x<T>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, T1) \ template void ImplT1_##x<T, T1>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T1* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(x, T, T1, T2) \ template void ImplT2_##x<T, T1, T2>(int32_t output_rank, \ const TArray<int64_t>* lhs_padded_strides, const T1* lhs_data, \ const TArray<int64_t>* rhs_padded_strides, const T2* rhs_data, \ const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count); #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(x, T) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, double) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double) // create declarations for impl #define BINARY_OP_NAME_EXPR(name, expr) \ BINARY_ELEMENTWISE_IMPL(name) BINARY_OPS() #undef BINARY_OP_NAME_EXPR // create specialized impl // the postfix of means the types supported by the op: // B: uint8_t // W: uint16_t // U: uint32_t // Z: uint64_t // C: int8_t // S: int16_t // I: int32_t // L: int64_t // H: float16 // F: float // D: double // O: bool SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Add, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow_7) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Max) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Min) // create declarations for impl for Pow BINARY_ELEMENTWISE_IMPL_T1(Pow) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int32_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int64_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, float) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, double) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, half) // create declarations for impl2 #define BINARY_OP_NAME_EXPR2(name, expr) \ BINARY_ELEMENTWISE_IMPL_T2(name) BINARY_OPS2() #undef BINARY_OP_NAME_EXPR2 #define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(name) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint32_t, uint32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint64_t, uint64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int32_t, int32_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int64_t, int64_t) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, half, half) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, float, float) \ SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, double, double) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Greater) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, bool, bool) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, int32_t, int32_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, int64_t, int64_t) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Less) } // namespace cuda } // namespace onnxruntime
946e08b87002d5e04708ebd27f1e84fe59560834.hip
// !!! This is a file automatically generated by hipify!!! /** This is test run to allocate zero copy memory. Zero copy memory is memory mapped in device address space. This could be used to improve PCIe transfer. We don't have to do explicit data transfer between host and device. If device memory is frequently read/write, then it could be huge impact on performance, since each operation needs to transferred to host on PCIe bus. */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> __global__ void Zero_Copy_Add(float* a, float* b, float* c, int size) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < size) { c[gid] = a[gid] + b[gid]; } } int main() { int deviceId = 0; hipSetDevice(deviceId); // Get Device Properties hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, deviceId); if (!deviceProp.canMapHostMemory) // check if mapping is supported or not. { printf("Device %d does not support mapping CPU host memory.\n"); return 0; } int size = 1 >> 25; int bytes = size * sizeof(float); dim3 block_size(128); dim3 grid(size / block_size.x); float* h_dataA, * h_dataB, * h_dataC; h_dataC = (float*)malloc(bytes); // allocate mapped host memory hipHostMalloc((float**)&h_dataA, bytes, hipHostMallocMapped); hipHostMalloc((float**)&h_dataB, bytes, hipHostMallocMapped); float* d_dataA, *d_dataB, *d_dataC; // Get Device pointer to mapped memory hipHostGetDevicePointer((float**)&d_dataA, h_dataA, 0); hipHostGetDevicePointer((float**)&d_dataB, h_dataB, 0); hipMalloc((float**)&d_dataC, bytes); Zero_Copy_Add << <grid, block_size >> > (d_dataA, d_dataB, d_dataC, size); hipDeviceSynchronize(); hipMemcpy(h_dataC, d_dataC, bytes, hipMemcpyDeviceToHost); hipHostFree(h_dataA); hipHostFree(h_dataB); hipFree(d_dataC); free(h_dataC); hipDeviceReset(); return 0; }
946e08b87002d5e04708ebd27f1e84fe59560834.cu
/** This is test run to allocate zero copy memory. Zero copy memory is memory mapped in device address space. This could be used to improve PCIe transfer. We don't have to do explicit data transfer between host and device. If device memory is frequently read/write, then it could be huge impact on performance, since each operation needs to transferred to host on PCIe bus. */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> __global__ void Zero_Copy_Add(float* a, float* b, float* c, int size) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if (gid < size) { c[gid] = a[gid] + b[gid]; } } int main() { int deviceId = 0; cudaSetDevice(deviceId); // Get Device Properties cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, deviceId); if (!deviceProp.canMapHostMemory) // check if mapping is supported or not. { printf("Device %d does not support mapping CPU host memory.\n"); return 0; } int size = 1 >> 25; int bytes = size * sizeof(float); dim3 block_size(128); dim3 grid(size / block_size.x); float* h_dataA, * h_dataB, * h_dataC; h_dataC = (float*)malloc(bytes); // allocate mapped host memory cudaHostAlloc((float**)&h_dataA, bytes, cudaHostAllocMapped); cudaHostAlloc((float**)&h_dataB, bytes, cudaHostAllocMapped); float* d_dataA, *d_dataB, *d_dataC; // Get Device pointer to mapped memory cudaHostGetDevicePointer((float**)&d_dataA, h_dataA, 0); cudaHostGetDevicePointer((float**)&d_dataB, h_dataB, 0); cudaMalloc((float**)&d_dataC, bytes); Zero_Copy_Add << <grid, block_size >> > (d_dataA, d_dataB, d_dataC, size); cudaDeviceSynchronize(); cudaMemcpy(h_dataC, d_dataC, bytes, cudaMemcpyDeviceToHost); cudaFreeHost(h_dataA); cudaFreeHost(h_dataB); cudaFree(d_dataC); free(h_dataC); cudaDeviceReset(); return 0; }
75c22d8f15f863c0d8ed39f393cc7e48a1324ec8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hip/hip_runtime.h> #include <time.h> #include <ctime> #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <iomanip> using namespace std; #define ull unsigned long long #define SHARED_MEM_ARR_SIZE 1024 #define RAND_BOUND 1 float* A; float* B; double KernelTime = 0; hipError_t sumReductionWithCudaQ6(float* A, ull size); int Q6Main(); __global__ void sumReductionKernelNoDivergence(float* A, ull size) { __shared__ float partialSumArr[SHARED_MEM_ARR_SIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; if (i < size) { //each thread loads one el from global memory partialSumArr[tx] = A[i]; for (unsigned int stride = blockDim.x/2; stride >=1; stride = stride >>1) { __syncthreads(); if (tx <stride && (i + stride < size)) partialSumArr[tx] += partialSumArr[tx + stride]; } __syncthreads(); if (tx == 0) A[blockIdx.x] = partialSumArr[0]; } } double CPUSequentialSum(float* a, int size); int main() { return Q6Main(); } int Q6Main() { std::cout << std::fixed; std::cout << std::setprecision(6); ull size; /* initialize random seed: */ srand(time(NULL)); cout << "Enter size " << endl; cin >> size; clock_t cpu_start1 = clock(); A = (float*)malloc(sizeof(float) * size); for (int i = 0; i < size; i++) A[i] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / RAND_BOUND)); //SEQUENTIAL clock_t cpu_start = clock(); float SeqSum = CPUSequentialSum(A, size); clock_t cpu_end = clock(); double cpu_time = (double)(cpu_end - cpu_start) / (double)CLOCKS_PER_SEC; double cpu_time_with_mmem = (double)(cpu_end - cpu_start1) / (double)CLOCKS_PER_SEC; clock_t gpu_start = clock(); hipError_t cudaStatus = sumReductionWithCudaQ6(A, size); double gpu_full_time = (double)(clock() - cpu_start) / (double)CLOCKS_PER_SEC; cout << "Sequential CPU Sum " << SeqSum << endl; cout << "Parallel GPU Sum = " << A[0] << " " << endl; cout << "Sequential CPU time without mem = " << cpu_time; cout << " GPU Kernel time = " << KernelTime << endl; cout << "Sequential CPU time with mem =" << cpu_time_with_mmem; cout << " GPU Full time = " << gpu_full_time << endl; cout << "---------\n Speedup without memory time = " << cpu_time / KernelTime << " Speedup with memory time = " << cpu_time_with_mmem / gpu_full_time << endl; if (cudaStatus != hipSuccess) { fprintf(stderr, "sumReductionWithCuda failed!"); return 1; } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } free(A); return 0; } hipError_t sumReductionWithCudaQ6(float* A, ull size) { float* dev_A = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for the array cudaStatus = hipMalloc((void**)&dev_A, size * sizeof(float)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_A, A, size * sizeof(float), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } int blockDimX = ceil(float(size) / 1024.0); dim3 dimBlock(1024, 1, 1); dim3 dimGrid(blockDimX, 1, 1); clock_t start = clock(); sumReductionKernelNoDivergence <<< dimGrid, dimBlock >> > (dev_A, size); cudaStatus = hipDeviceSynchronize(); while (blockDimX > 1) { size = blockDimX; dim3 dimGrid(blockDimX, 1, 1); sumReductionKernelNoDivergence << < dimGrid, dimBlock >> > (dev_A, size); cudaStatus = hipDeviceSynchronize(); blockDimX = ceil(float(blockDimX) / 1024.0); } KernelTime = (double)(clock() - start) / CLOCKS_PER_SEC; // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = hipMemcpy(A, dev_A, size * sizeof(float), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_A); return cudaStatus; } double CPUSequentialSum(float* a, int size) { double sum = 0; for (int i = 0; i < size; i++) sum +=a[i]; return sum; }
75c22d8f15f863c0d8ed39f393cc7e48a1324ec8.cu
#include "cuda_runtime.h" #include <cuda.h> #include <time.h> #include <ctime> #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include <iomanip> using namespace std; #define ull unsigned long long #define SHARED_MEM_ARR_SIZE 1024 #define RAND_BOUND 1 float* A; float* B; double KernelTime = 0; cudaError_t sumReductionWithCudaQ6(float* A, ull size); int Q6Main(); __global__ void sumReductionKernelNoDivergence(float* A, ull size) { __shared__ float partialSumArr[SHARED_MEM_ARR_SIZE]; int i = blockDim.x * blockIdx.x + threadIdx.x; int tx = threadIdx.x; if (i < size) { //each thread loads one el from global memory partialSumArr[tx] = A[i]; for (unsigned int stride = blockDim.x/2; stride >=1; stride = stride >>1) { __syncthreads(); if (tx <stride && (i + stride < size)) partialSumArr[tx] += partialSumArr[tx + stride]; } __syncthreads(); if (tx == 0) A[blockIdx.x] = partialSumArr[0]; } } double CPUSequentialSum(float* a, int size); int main() { return Q6Main(); } int Q6Main() { std::cout << std::fixed; std::cout << std::setprecision(6); ull size; /* initialize random seed: */ srand(time(NULL)); cout << "Enter size " << endl; cin >> size; clock_t cpu_start1 = clock(); A = (float*)malloc(sizeof(float) * size); for (int i = 0; i < size; i++) A[i] = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / RAND_BOUND)); //SEQUENTIAL clock_t cpu_start = clock(); float SeqSum = CPUSequentialSum(A, size); clock_t cpu_end = clock(); double cpu_time = (double)(cpu_end - cpu_start) / (double)CLOCKS_PER_SEC; double cpu_time_with_mmem = (double)(cpu_end - cpu_start1) / (double)CLOCKS_PER_SEC; clock_t gpu_start = clock(); cudaError_t cudaStatus = sumReductionWithCudaQ6(A, size); double gpu_full_time = (double)(clock() - cpu_start) / (double)CLOCKS_PER_SEC; cout << "Sequential CPU Sum " << SeqSum << endl; cout << "Parallel GPU Sum = " << A[0] << " " << endl; cout << "Sequential CPU time without mem = " << cpu_time; cout << " GPU Kernel time = " << KernelTime << endl; cout << "Sequential CPU time with mem =" << cpu_time_with_mmem; cout << " GPU Full time = " << gpu_full_time << endl; cout << "---------\n Speedup without memory time = " << cpu_time / KernelTime << " Speedup with memory time = " << cpu_time_with_mmem / gpu_full_time << endl; if (cudaStatus != cudaSuccess) { fprintf(stderr, "sumReductionWithCuda failed!"); return 1; } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } free(A); return 0; } cudaError_t sumReductionWithCudaQ6(float* A, ull size) { float* dev_A = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for the array cudaStatus = cudaMalloc((void**)&dev_A, size * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_A, A, size * sizeof(float), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } int blockDimX = ceil(float(size) / 1024.0); dim3 dimBlock(1024, 1, 1); dim3 dimGrid(blockDimX, 1, 1); clock_t start = clock(); sumReductionKernelNoDivergence <<< dimGrid, dimBlock >> > (dev_A, size); cudaStatus = cudaDeviceSynchronize(); while (blockDimX > 1) { size = blockDimX; dim3 dimGrid(blockDimX, 1, 1); sumReductionKernelNoDivergence << < dimGrid, dimBlock >> > (dev_A, size); cudaStatus = cudaDeviceSynchronize(); blockDimX = ceil(float(blockDimX) / 1024.0); } KernelTime = (double)(clock() - start) / CLOCKS_PER_SEC; // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } cudaStatus = cudaMemcpy(A, dev_A, size * sizeof(float), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_A); return cudaStatus; } double CPUSequentialSum(float* a, int size) { double sum = 0; for (int i = 0; i < size; i++) sum +=a[i]; return sum; }
2d05bda0a8086dd655aa82f29fe7ee8b8528b376.hip
// !!! This is a file automatically generated by hipify!!! // -*- C++ -*- // -*- coding: utf-8 -*- // // michael a.g. avzis <[email protected]> // parasim // (c) 1998-2019 all rights reserved // // configuration #include <portinfo> // STL #include <exception> #include <string> // cuda #include <hip/hip_runtime.h> // pyre #include <pyre/journal.h> // local declarations #include "kernels.h" // helpers __global__ static void _r2c(const float * gamma, std::size_t corDim, std::size_t zmdDim, hipComplex * scratch); // compute the amplitude of the signal tiles, assuming pixels are of type std::complex<float> auto ampcor::cuda::kernels:: r2c(const float * gamma, std::size_t pairs, std::size_t corDim, std::size_t zmdDim) -> hipComplex * { // constants const auto Mb = 1.0 / 1024 / 1024; // grab a spot hipComplex * scratch = nullptr; // compute the number of cells in the zoomed correlation matrix auto zmdCells = zmdDim * zmdDim; // compute the amount of memory we need auto footprint = pairs * zmdCells * sizeof(hipComplex); // allocate memory for the complex zoomed version auto status = hipMallocManaged(&scratch, footprint); // if something went wrong if (status != hipSuccess) { // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "while allocating " << footprint * Mb << " Mb of device memory for the zoomed correlation matrix" << hipGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail throw std::bad_alloc(); } // initialize the memory status = hipMemset(scratch, 0, footprint); // if something went wrong if (status != hipSuccess) { // get the error description std::string description = hipGetErrorName(status); // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "while initializing " << footprint * Mb << " Mb of device memory for the zoomed correlation matrix: " << description << " (" << status << ")" << pyre::journal::endl; // and bail throw std::runtime_error(description); } // we will launch enough blocks auto B = pairs; // with enough threads auto T = 32 * (corDim / 32 + (corDim % 32) ? 1 : 0); // make a channel pyre::journal::debug_t channel("ampcor.cuda"); // show me channel << pyre::journal::at(__HERE__) << "launching " << B << " blocks of " << T << " threads each to process " << corDim << " columns of the correlation hyper-matrix" << pyre::journal::endl; // launch hipLaunchKernelGGL(( _r2c) , dim3(B),dim3(T), 0, 0, gamma, corDim, zmdDim, scratch); // wait for the device to finish status = hipDeviceSynchronize(); // if something went wrong if (status != hipSuccess) { // form the error description std::string description = hipGetErrorName(status); // make a channel pyre::journal::error_t channel("ampcor.cuda"); // complain channel << pyre::journal::at(__HERE__) << "while upcasting and embedding the correlation hyper-matrix: " << description << " (" << status << ")" << pyre::journal::endl; // bail throw std::runtime_error(description); } // all done return scratch; } // implementations __global__ static void _r2c(const float * gamma, std::size_t corDim, std::size_t zmdDim, hipComplex * scratch) { // build the workload descriptors // global // std::size_t B = gridDim.x; // number of blocks // std::size_t T = blockDim.x; // number of threads per block // auto W = B*T; // total number of workers // local std::size_t b = blockIdx.x; // my block id std::size_t t = threadIdx.x; // my thread id // auto w = b*T + t; // my worker id // if there is no work for me if (t >= corDim) { // nothing to do return; } // compute the number of cells in the correlation matrix auto corCells = corDim * corDim; // compute the number of cells in a zoomed correlation matrix auto zmdCells = zmdDim * zmdDim; // find the matrix I'm reading from and skip to my column auto src = gamma + b * corCells + t; // and the matrix I'm writing to and skip to my column auto dst = scratch + b * zmdCells + t; // transfer one whole column of {gamma} to {scratch} for (auto idx = 0; idx < corDim; ++idx) { // read the data, convert to complex, and store *dst = {*src, 0}; // update the pointers: // {src} skips {corDim} cells src += corDim; // while {dst} must skip over {zmdDim} cells; dst += zmdDim; } // all done return; } // end of file
2d05bda0a8086dd655aa82f29fe7ee8b8528b376.cu
// -*- C++ -*- // -*- coding: utf-8 -*- // // michael a.g. aïvázis <[email protected]> // parasim // (c) 1998-2019 all rights reserved // // configuration #include <portinfo> // STL #include <exception> #include <string> // cuda #include <cuda_runtime.h> // pyre #include <pyre/journal.h> // local declarations #include "kernels.h" // helpers __global__ static void _r2c(const float * gamma, std::size_t corDim, std::size_t zmdDim, cuComplex * scratch); // compute the amplitude of the signal tiles, assuming pixels are of type std::complex<float> auto ampcor::cuda::kernels:: r2c(const float * gamma, std::size_t pairs, std::size_t corDim, std::size_t zmdDim) -> cuComplex * { // constants const auto Mb = 1.0 / 1024 / 1024; // grab a spot cuComplex * scratch = nullptr; // compute the number of cells in the zoomed correlation matrix auto zmdCells = zmdDim * zmdDim; // compute the amount of memory we need auto footprint = pairs * zmdCells * sizeof(cuComplex); // allocate memory for the complex zoomed version auto status = cudaMallocManaged(&scratch, footprint); // if something went wrong if (status != cudaSuccess) { // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "while allocating " << footprint * Mb << " Mb of device memory for the zoomed correlation matrix" << cudaGetErrorName(status) << " (" << status << ")" << pyre::journal::endl; // and bail throw std::bad_alloc(); } // initialize the memory status = cudaMemset(scratch, 0, footprint); // if something went wrong if (status != cudaSuccess) { // get the error description std::string description = cudaGetErrorName(status); // make a channel pyre::journal::error_t error("ampcor.cuda"); // complain error << pyre::journal::at(__HERE__) << "while initializing " << footprint * Mb << " Mb of device memory for the zoomed correlation matrix: " << description << " (" << status << ")" << pyre::journal::endl; // and bail throw std::runtime_error(description); } // we will launch enough blocks auto B = pairs; // with enough threads auto T = 32 * (corDim / 32 + (corDim % 32) ? 1 : 0); // make a channel pyre::journal::debug_t channel("ampcor.cuda"); // show me channel << pyre::journal::at(__HERE__) << "launching " << B << " blocks of " << T << " threads each to process " << corDim << " columns of the correlation hyper-matrix" << pyre::journal::endl; // launch _r2c <<<B,T>>> (gamma, corDim, zmdDim, scratch); // wait for the device to finish status = cudaDeviceSynchronize(); // if something went wrong if (status != cudaSuccess) { // form the error description std::string description = cudaGetErrorName(status); // make a channel pyre::journal::error_t channel("ampcor.cuda"); // complain channel << pyre::journal::at(__HERE__) << "while upcasting and embedding the correlation hyper-matrix: " << description << " (" << status << ")" << pyre::journal::endl; // bail throw std::runtime_error(description); } // all done return scratch; } // implementations __global__ static void _r2c(const float * gamma, std::size_t corDim, std::size_t zmdDim, cuComplex * scratch) { // build the workload descriptors // global // std::size_t B = gridDim.x; // number of blocks // std::size_t T = blockDim.x; // number of threads per block // auto W = B*T; // total number of workers // local std::size_t b = blockIdx.x; // my block id std::size_t t = threadIdx.x; // my thread id // auto w = b*T + t; // my worker id // if there is no work for me if (t >= corDim) { // nothing to do return; } // compute the number of cells in the correlation matrix auto corCells = corDim * corDim; // compute the number of cells in a zoomed correlation matrix auto zmdCells = zmdDim * zmdDim; // find the matrix I'm reading from and skip to my column auto src = gamma + b * corCells + t; // and the matrix I'm writing to and skip to my column auto dst = scratch + b * zmdCells + t; // transfer one whole column of {gamma} to {scratch} for (auto idx = 0; idx < corDim; ++idx) { // read the data, convert to complex, and store *dst = {*src, 0}; // update the pointers: // {src} skips {corDim} cells src += corDim; // while {dst} must skip over {zmdDim} cells; dst += zmdDim; } // all done return; } // end of file
d7b0e6f10c1262472a7ec44f5351a667d164ff4c.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "matrix.h" template<typename T> void FreeDeviceMemory(T* d_ptr){ hipFree(d_ptr); } template void FreeDeviceMemory(float* d_ptr); template void FreeDeviceMemory(double* d_ptr); template void FreeDeviceMemory(int* d_ptr); template<typename T> int Vec<T>::AllocVectorToDevice(){ if(d_val) hipFree(d_val); if(hipMalloc((void**)&d_val,m*sizeof(T)) == hipSuccess){ return 0; } else{ return -1; } } template int Vec<float>::AllocVectorToDevice(); template int Vec<double>::AllocVectorToDevice(); template<typename T> int Vec<T>::SetVectorValueToDevice(){ if(hipMemcpy(d_val,val,m*sizeof(T),hipMemcpyHostToDevice) == hipSuccess){ return 0; } return -1; } template int Vec<float>::SetVectorValueToDevice(); template int Vec<double>::SetVectorValueToDevice(); template<typename T> int Vec<T>::GetVectorValueFromDevice(){ if(hipMemcpy(val,d_val,m*sizeof(T),hipMemcpyDeviceToHost) == hipSuccess){ return 0; } return -1; } template int Vec<float>::GetVectorValueFromDevice(); template int Vec<double>::GetVectorValueFromDevice(); template<typename T> int CSR<T>::CopyMatToDevice(){ hipError_t err; int nnz = rowptr[m]; if(d_val) hipFree(d_val); if(d_rowptr) hipFree(d_rowptr); if(d_colind) hipFree(d_colind); err = hipMalloc((void**)&d_val,nnz*sizeof(T)); if(err != hipSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = hipMalloc((void**)&d_colind,nnz*sizeof(int)); if(err != hipSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = hipMalloc((void**)&d_rowptr,(m+1)*sizeof(int)); if(err != hipSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = hipMemcpy(d_val,val,nnz*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = hipMemcpy(d_colind,colind,nnz*sizeof(int),hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = hipMemcpy(d_rowptr,rowptr,(m+1)*sizeof(int),hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} return 0; } template int CSR<float>::CopyMatToDevice(); template int CSR<double>::CopyMatToDevice(); template<typename T> int ELL<T>::CopyMatToDevice(){ hipError_t err; if(d_val) hipFree(d_val); if(d_colind) hipFree(d_colind); err = hipMalloc((void**)&d_val,m*k*sizeof(T)); if(err != hipSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = hipMalloc((void**)&d_colind,m*k*sizeof(int)); if(err != hipSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = hipMemcpy(d_val,val,m*k*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = hipMemcpy(d_colind,colind,m*k*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} return 0; } template int ELL<float>::CopyMatToDevice(); template int ELL<double>::CopyMatToDevice(); template<typename T> int COO<T>::CopyMatToDevice(){ hipError_t err; if(d_val) hipFree(d_val); if(d_colind) hipFree(d_colind); if(d_rowind) hipFree(d_rowind); err = hipMalloc((void**)&d_val,nnz*sizeof(T)); if(err != hipSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = hipMalloc((void**)&d_colind,nnz*sizeof(T)); if(err != hipSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = hipMalloc((void**)&d_rowind,nnz*sizeof(T)); if(err != hipSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = hipMemcpy(d_val,val,nnz*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} err = hipMemcpy(d_colind,colind,nnz*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} err = hipMemcpy(d_rowind,rowind,nnz*sizeof(T),hipMemcpyHostToDevice); if(err != hipSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} return 0; } template int COO<float>::CopyMatToDevice(); template int COO<double>::CopyMatToDevice();
d7b0e6f10c1262472a7ec44f5351a667d164ff4c.cu
#include <cuda.h> #include <cuda_runtime.h> #include "matrix.h" template<typename T> void FreeDeviceMemory(T* d_ptr){ cudaFree(d_ptr); } template void FreeDeviceMemory(float* d_ptr); template void FreeDeviceMemory(double* d_ptr); template void FreeDeviceMemory(int* d_ptr); template<typename T> int Vec<T>::AllocVectorToDevice(){ if(d_val) cudaFree(d_val); if(cudaMalloc((void**)&d_val,m*sizeof(T)) == cudaSuccess){ return 0; } else{ return -1; } } template int Vec<float>::AllocVectorToDevice(); template int Vec<double>::AllocVectorToDevice(); template<typename T> int Vec<T>::SetVectorValueToDevice(){ if(cudaMemcpy(d_val,val,m*sizeof(T),cudaMemcpyHostToDevice) == cudaSuccess){ return 0; } return -1; } template int Vec<float>::SetVectorValueToDevice(); template int Vec<double>::SetVectorValueToDevice(); template<typename T> int Vec<T>::GetVectorValueFromDevice(){ if(cudaMemcpy(val,d_val,m*sizeof(T),cudaMemcpyDeviceToHost) == cudaSuccess){ return 0; } return -1; } template int Vec<float>::GetVectorValueFromDevice(); template int Vec<double>::GetVectorValueFromDevice(); template<typename T> int CSR<T>::CopyMatToDevice(){ cudaError_t err; int nnz = rowptr[m]; if(d_val) cudaFree(d_val); if(d_rowptr) cudaFree(d_rowptr); if(d_colind) cudaFree(d_colind); err = cudaMalloc((void**)&d_val,nnz*sizeof(T)); if(err != cudaSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = cudaMalloc((void**)&d_colind,nnz*sizeof(int)); if(err != cudaSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = cudaMalloc((void**)&d_rowptr,(m+1)*sizeof(int)); if(err != cudaSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = cudaMemcpy(d_val,val,nnz*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = cudaMemcpy(d_colind,colind,nnz*sizeof(int),cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = cudaMemcpy(d_rowptr,rowptr,(m+1)*sizeof(int),cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} return 0; } template int CSR<float>::CopyMatToDevice(); template int CSR<double>::CopyMatToDevice(); template<typename T> int ELL<T>::CopyMatToDevice(){ cudaError_t err; if(d_val) cudaFree(d_val); if(d_colind) cudaFree(d_colind); err = cudaMalloc((void**)&d_val,m*k*sizeof(T)); if(err != cudaSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = cudaMalloc((void**)&d_colind,m*k*sizeof(int)); if(err != cudaSuccess){ fprintf(stderr,"fail to malloc on GPU\n");return -1;} err = cudaMemcpy(d_val,val,m*k*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} err = cudaMemcpy(d_colind,colind,m*k*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess){ fprintf(stderr,"fail to memcpy to GPU\n");return -1;} return 0; } template int ELL<float>::CopyMatToDevice(); template int ELL<double>::CopyMatToDevice(); template<typename T> int COO<T>::CopyMatToDevice(){ cudaError_t err; if(d_val) cudaFree(d_val); if(d_colind) cudaFree(d_colind); if(d_rowind) cudaFree(d_rowind); err = cudaMalloc((void**)&d_val,nnz*sizeof(T)); if(err != cudaSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = cudaMalloc((void**)&d_colind,nnz*sizeof(T)); if(err != cudaSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = cudaMalloc((void**)&d_rowind,nnz*sizeof(T)); if(err != cudaSuccess) {fprintf(stderr,"fail at malloc on GPU\n");return -1;} err = cudaMemcpy(d_val,val,nnz*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} err = cudaMemcpy(d_colind,colind,nnz*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} err = cudaMemcpy(d_rowind,rowind,nnz*sizeof(T),cudaMemcpyHostToDevice); if(err != cudaSuccess) { fprintf(stderr,"fail at memcpy to GPU\n");return -1;} return 0; } template int COO<float>::CopyMatToDevice(); template int COO<double>::CopyMatToDevice();
7ff34280836263fe0f1c8a3703bd697fa3c72708.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <iostream> #include <cmath> #include "float32.h" #define IN_DATA_BYTES (IN_SIZE*sizeof(dtype)) #define OUT_DATA_BYTES (OUT_SIZE*sizeof(dtype)) //function to print out error message from cuDNN calls #define checkCUDNN(exp) \ { \ cudnnStatus_t status = (exp); \ if(status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } int main() { cudnnHandle_t cudnn; checkCUDNN(cudnnCreate(&cudnn)); cudnnPoolingDescriptor_t pooling_desc; //create descriptor handle checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_desc)); std::cout << "cudnnCreatePoolingDescriptor is ok...\n"; //initialize descriptor const int poolDims = 2; int windowDimA[poolDims] = {2, 2}; int paddingA[poolDims] = {0, 0}; int strideA[poolDims] = {2, 2}; checkCUDNN(cudnnSetPoolingNdDescriptor(pooling_desc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, poolDims, windowDimA, paddingA, strideA)); std::cout << "cudnnSetPooling2dDescriptor is ok...\n"; cudnnTensorDescriptor_t in_desc; //create input data tensor descriptor checkCUDNN(cudnnCreateTensorDescriptor(&in_desc)); std::cout << "cudnnCreateTensorDescriptor is ok...\n"; //initialize input data descriptor checkCUDNN(cudnnSetTensor4dDescriptor(in_desc, //descriptor handle CUDNN_TENSOR_NCHW, //data format CUDNN_DTYPE, //data type (precision) 1, //number of images 20, //number of channels 24, //data height 24)); //data width std::cout << "cudnnSetTensor4dDescriptor is ok...\n"; cudnnTensorDescriptor_t out_desc; //create output data tensor descriptor checkCUDNN(cudnnCreateTensorDescriptor(&out_desc)); std::cout << "cudnnCreateTensorDescriptor is ok...\n"; //initialize output data descriptor checkCUDNN(cudnnSetTensor4dDescriptor(out_desc, //descriptor handle CUDNN_TENSOR_NCHW, //data format CUDNN_DTYPE, //data type (precision) 1, //number of images 20, //number of channels 12, //data height 12)); //data width std::cout << "cudnnSetTensor4dDescriptor is ok...\n"; stype alpha = 1.0f; stype beta = 0.0f; //GPU data pointers dtype *in_data, *out_data; //allocate arrays on GPU hipMalloc(&in_data, IN_DATA_BYTES); hipMalloc(&out_data, OUT_DATA_BYTES); //copy input data to GPU array hipMemcpy(in_data, input, IN_DATA_BYTES, hipMemcpyHostToDevice); //initize output data on GPU hipMemset(out_data, 0, OUT_DATA_BYTES); //Call pooling operator checkCUDNN(cudnnPoolingForward(cudnn, //cuDNN context handle pooling_desc, //pooling descriptor handle &alpha, //alpha scaling factor in_desc, //input tensor descriptor in_data, //input data pointer to GPU memory &beta, //beta scaling factor out_desc, //output tensor descriptor out_data)); //output data pointer from GPU memory std::cout << "cudnnPoolingForward is ok...\n"; //allocate array on CPU for output tensor data dtype *result = (dtype *) malloc(OUT_DATA_BYTES); //copy output data from GPU hipMemcpy(result, out_data, OUT_DATA_BYTES, hipMemcpyDeviceToHost); //loop over and check that the forward pass outputs match expected results (exactly) int err = 0; for (int i = 0; i < OUT_SIZE; i++) { if (result[i] != output[i]) { std::cout << "Error! Expected " << output[i] << " got " << result[i] << " for idx " << i << std::endl; err++; } } std::cout << "Forward finished with " << err << " errors" << std::endl; //free CPU arrays free(result); //free GPU arrays hipFree(in_data); hipFree(out_data); //free cuDNN descriptors cudnnDestroyTensorDescriptor(in_desc); cudnnDestroyTensorDescriptor(out_desc); cudnnDestroyPoolingDescriptor(pooling_desc); cudnnDestroy(cudnn); return 0; }
7ff34280836263fe0f1c8a3703bd697fa3c72708.cu
#include <cudnn.h> #include <stdio.h> #include <iostream> #include <cmath> #include "float32.h" #define IN_DATA_BYTES (IN_SIZE*sizeof(dtype)) #define OUT_DATA_BYTES (OUT_SIZE*sizeof(dtype)) //function to print out error message from cuDNN calls #define checkCUDNN(exp) \ { \ cudnnStatus_t status = (exp); \ if(status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } int main() { cudnnHandle_t cudnn; checkCUDNN(cudnnCreate(&cudnn)); cudnnPoolingDescriptor_t pooling_desc; //create descriptor handle checkCUDNN(cudnnCreatePoolingDescriptor(&pooling_desc)); std::cout << "cudnnCreatePoolingDescriptor is ok...\n"; //initialize descriptor const int poolDims = 2; int windowDimA[poolDims] = {2, 2}; int paddingA[poolDims] = {0, 0}; int strideA[poolDims] = {2, 2}; checkCUDNN(cudnnSetPoolingNdDescriptor(pooling_desc, CUDNN_POOLING_MAX, CUDNN_PROPAGATE_NAN, poolDims, windowDimA, paddingA, strideA)); std::cout << "cudnnSetPooling2dDescriptor is ok...\n"; cudnnTensorDescriptor_t in_desc; //create input data tensor descriptor checkCUDNN(cudnnCreateTensorDescriptor(&in_desc)); std::cout << "cudnnCreateTensorDescriptor is ok...\n"; //initialize input data descriptor checkCUDNN(cudnnSetTensor4dDescriptor(in_desc, //descriptor handle CUDNN_TENSOR_NCHW, //data format CUDNN_DTYPE, //data type (precision) 1, //number of images 20, //number of channels 24, //data height 24)); //data width std::cout << "cudnnSetTensor4dDescriptor is ok...\n"; cudnnTensorDescriptor_t out_desc; //create output data tensor descriptor checkCUDNN(cudnnCreateTensorDescriptor(&out_desc)); std::cout << "cudnnCreateTensorDescriptor is ok...\n"; //initialize output data descriptor checkCUDNN(cudnnSetTensor4dDescriptor(out_desc, //descriptor handle CUDNN_TENSOR_NCHW, //data format CUDNN_DTYPE, //data type (precision) 1, //number of images 20, //number of channels 12, //data height 12)); //data width std::cout << "cudnnSetTensor4dDescriptor is ok...\n"; stype alpha = 1.0f; stype beta = 0.0f; //GPU data pointers dtype *in_data, *out_data; //allocate arrays on GPU cudaMalloc(&in_data, IN_DATA_BYTES); cudaMalloc(&out_data, OUT_DATA_BYTES); //copy input data to GPU array cudaMemcpy(in_data, input, IN_DATA_BYTES, cudaMemcpyHostToDevice); //initize output data on GPU cudaMemset(out_data, 0, OUT_DATA_BYTES); //Call pooling operator checkCUDNN(cudnnPoolingForward(cudnn, //cuDNN context handle pooling_desc, //pooling descriptor handle &alpha, //alpha scaling factor in_desc, //input tensor descriptor in_data, //input data pointer to GPU memory &beta, //beta scaling factor out_desc, //output tensor descriptor out_data)); //output data pointer from GPU memory std::cout << "cudnnPoolingForward is ok...\n"; //allocate array on CPU for output tensor data dtype *result = (dtype *) malloc(OUT_DATA_BYTES); //copy output data from GPU cudaMemcpy(result, out_data, OUT_DATA_BYTES, cudaMemcpyDeviceToHost); //loop over and check that the forward pass outputs match expected results (exactly) int err = 0; for (int i = 0; i < OUT_SIZE; i++) { if (result[i] != output[i]) { std::cout << "Error! Expected " << output[i] << " got " << result[i] << " for idx " << i << std::endl; err++; } } std::cout << "Forward finished with " << err << " errors" << std::endl; //free CPU arrays free(result); //free GPU arrays cudaFree(in_data); cudaFree(out_data); //free cuDNN descriptors cudnnDestroyTensorDescriptor(in_desc); cudnnDestroyTensorDescriptor(out_desc); cudnnDestroyPoolingDescriptor(pooling_desc); cudnnDestroy(cudnn); return 0; }
084ae8edc186d24779af5250ba745839dc88d86e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void RoIPool3dFForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int length, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, l, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int l = (index / pooled_width / pooled_height) % length; int c = (index / pooled_width / pooled_height / length) % channels; int n = index / pooled_width / pooled_height / length / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_bottom_data = bottom_data + ((roi_batch_ind * channels + c) * length + l) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPool3dFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int length, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, l, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int l = (index / pooled_width / pooled_height) % length; int c = (index / pooled_width / pooled_height / length) % channels; int n = index / pooled_width / pooled_height / length / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = ((roi_batch_ind * channels + c) * length + l) * height * width; int top_offset = ((n * channels + c) * length + l) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool3d_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto length = input.size(2); auto height = input.size(3); auto width = input.size(4); auto output = at::empty({num_rois, channels, length, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * length * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, length, pooled_height, pooled_width}, input.options().dtype(at::kInt)); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv(output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type().scalarType(), "ROIPool3d_forward", [&] { hipLaunchKernelGGL(( RoIPool3dFForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, length, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(hipGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIPool3d_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int length, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, length, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv(grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type().scalarType(), "ROIPool3d_backward", [&] { hipLaunchKernelGGL(( RoIPool3dFBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, length, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
084ae8edc186d24779af5250ba745839dc88d86e.cu
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void RoIPool3dFForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int length, const int height, const int width, const int pooled_height, const int pooled_width, const T* bottom_rois, T* top_data, int* argmax_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, l, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int l = (index / pooled_width / pooled_height) % length; int c = (index / pooled_width / pooled_height / length) % channels; int n = index / pooled_width / pooled_height / length / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int roi_start_w = round(offset_bottom_rois[1] * spatial_scale); int roi_start_h = round(offset_bottom_rois[2] * spatial_scale); int roi_end_w = round(offset_bottom_rois[3] * spatial_scale); int roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero T maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; const T* offset_bottom_data = bottom_data + ((roi_batch_ind * channels + c) * length + l) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int bottom_index = h * width + w; if (offset_bottom_data[bottom_index] > maxval) { maxval = offset_bottom_data[bottom_index]; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = maxidx; } } template <typename T> __global__ void RoIPool3dFBackward(const int nthreads, const T* top_diff, const int* argmax_data, const int num_rois, const T spatial_scale, const int channels, const int length, const int height, const int width, const int pooled_height, const int pooled_width, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, l, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int l = (index / pooled_width / pooled_height) % length; int c = (index / pooled_width / pooled_height / length) % channels; int n = index / pooled_width / pooled_height / length / channels; const T* offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; int bottom_offset = ((roi_batch_ind * channels + c) * length + l) * height * width; int top_offset = ((n * channels + c) * length + l) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; T* offset_bottom_diff = bottom_diff + bottom_offset; const int* offset_argmax_data = argmax_data + top_offset; int argmax = offset_argmax_data[ph * pooled_width + pw]; if (argmax != -1) { atomicAdd( offset_bottom_diff + argmax, static_cast<T>(offset_top_diff[ph * pooled_width + pw])); } } } std::tuple<at::Tensor, at::Tensor> ROIPool3d_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto length = input.size(2); auto height = input.size(3); auto width = input.size(4); auto output = at::empty({num_rois, channels, length, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * length * pooled_height * pooled_width * channels; auto argmax = at::zeros({num_rois, channels, length, pooled_height, pooled_width}, input.options().dtype(at::kInt)); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv(output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } AT_DISPATCH_FLOATING_TYPES(input.type().scalarType(), "ROIPool3d_forward", [&] { RoIPool3dFForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, length, height, width, pooled_height, pooled_width, rois.contiguous().data<scalar_t>(), output.data<scalar_t>(), argmax.data<int>()); }); THCudaCheck(cudaGetLastError()); return std::make_tuple(output, argmax); } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIPool3d_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int length, const int height, const int width) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); // TODO add more checks auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, length, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv(grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type().scalarType(), "ROIPool3d_backward", [&] { RoIPool3dFBackward<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), argmax.data<int>(), num_rois, spatial_scale, channels, length, height, width, pooled_height, pooled_width, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
3549eaa93d277742ee4b818133f46638e40b13f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ __global__ void subsample(hipLaunchParm lp, float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) float the_weight = weight[k]; // Initialize to the bias float the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = the_weight*sum + the_bias; } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ __global__ void subgradweight(hipLaunchParm lp, float *input, float *gradOutput, float *gradWeight, float *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = hipBlockDim_x*hipThreadIdx_y + hipThreadIdx_x; // create array to hold partial sums __shared__ float sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((hipThreadIdx_x == 0) && (hipThreadIdx_y == 0)) { for(int i = 0; i < hipBlockDim_x*hipBlockDim_y; i++) gradWeight[k] += scale*sums[i]; } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(hipBlockDim_x*hipBlockDim_y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((hipThreadIdx_x == 0) && (hipThreadIdx_y == 0)) { for (int i=0; i<(hipBlockDim_x*hipBlockDim_y); i++) gradBias[k] += scale*sums[i]; } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinput(hipLaunchParm lp, float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinputAtomic(hipLaunchParm lp, float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } void THNN_CudaSpatialSubSampling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, int kW, int kH, int dW, int dH) { float *weight_data = THCudaTensor_data(state, weight); float *bias_data = THCudaTensor_data(state, bias); float *output_data; float *input_data; int nInputPlane = THCudaTensor_size(state, weight, 0); THCUNN_assertSameGPU(state, 4, input, output, weight, bias); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernel(HIP_KERNEL_NAME(subsample), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernel(HIP_KERNEL_NAME(subsample), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(hipGetLastError()); } // clean THCudaTensor_free(state, input); } void THNN_CudaSpatialSubSampling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, int kW, int kH, int dW, int dH) { THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput); int nInputPlane = THCudaTensor_size(state, weight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernel(HIP_KERNEL_NAME(subgradinput), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernel(HIP_KERNEL_NAME(subgradinputAtomic), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernel(HIP_KERNEL_NAME(subgradinput), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernel(HIP_KERNEL_NAME(subgradinputAtomic), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } THCudaCheck(hipGetLastError()); } } void THNN_CudaSpatialSubSampling_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, int kW, int kH, int dW, int dH, float scale) { THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias); int nInputPlane = THCudaTensor_size(state, gradWeight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel hipLaunchKernel(HIP_KERNEL_NAME(subgradweight), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, gradOutput_data, gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel long sl; for (sl=0; sl<nbatch; sl++) { hipLaunchKernel(HIP_KERNEL_NAME(subgradweight), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data + sl*input->stride[0], gradOutput_data + sl*gradOutput->stride[0], gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } THCudaCheck(hipGetLastError()); } // clean THCudaTensor_free(state, input); } #undef CUDA_MAX_THREADS
3549eaa93d277742ee4b818133f46638e40b13f8.cu
#include "hip/hip_runtime.h" #include "THCUNN.h" #include "common.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function subsamples an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output, 1D weight, 1D bias */ __global__ void subsample(hipLaunchParm lp, float *input, float *output, float *weight, float *bias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // Get the good mask for (k,i) (k out, i in) float the_weight = weight[k]; // Initialize to the bias float the_bias = bias[k]; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = the_weight*sum + the_bias; } } } /* * Description: * this function computes the gradWeight from input and gradOutput */ __global__ void subgradweight(hipLaunchParm lp, float *input, float *gradOutput, float *gradWeight, float *gradBias, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW, float scale) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; input = input + i*input_w*input_h; // thread ID int tid = hipBlockDim_x*hipThreadIdx_y + hipThreadIdx_x; // create array to hold partial sums __shared__ float sums[CUDA_MAX_THREADS]; sums[tid] = 0; // compute partial sums for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; long kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { sums[tid] += z * ptr_input[kx]; } ptr_input += input_w; } } } __syncthreads(); // reduce: accumulate all partial sums to produce final gradWeight if ((hipThreadIdx_x == 0) && (hipThreadIdx_y == 0)) { for(int i = 0; i < hipBlockDim_x*hipBlockDim_y; i++) gradWeight[k] += scale*sums[i]; } __syncthreads(); // compute gradBias sums[tid] = 0; for (int i=tid; i<output_w*output_h; i+=(hipBlockDim_x*hipBlockDim_y)) { sums[tid] += gradOutput[i]; } __syncthreads(); // reduce gradBias if ((hipThreadIdx_x == 0) && (hipThreadIdx_y == 0)) { for (int i=0; i<(hipBlockDim_x*hipBlockDim_y); i++) gradBias[k] += scale*sums[i]; } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinput(hipLaunchParm lp, float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ __global__ void subgradinputAtomic(hipLaunchParm lp, float *gradInput, float *gradOutput, float *weight, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = hipBlockIdx_x; int i = o; int k = hipBlockIdx_x % input_n; int xx_start = hipThreadIdx_x; int xx_end = output_w; int xx_step = hipBlockDim_x; int yy_start = hipBlockDim_y*hipBlockIdx_y + hipThreadIdx_y; int yy_end = output_h; int yy_step = hipBlockDim_y*hipGridDim_y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // get weight float the_weight = weight[k]; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput * the_weight; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } void THNN_CudaSpatialSubSampling_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *weight, THCudaTensor *bias, int kW, int kH, int dW, int dH) { float *weight_data = THCudaTensor_data(state, weight); float *bias_data = THCudaTensor_data(state, bias); float *output_data; float *input_data; int nInputPlane = THCudaTensor_size(state, weight, 0); THCUNN_assertSameGPU(state, 4, input, output, weight, bias); THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernel(HIP_KERNEL_NAME(subsample), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes"); THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernel(HIP_KERNEL_NAME(subsample), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, output_data, weight_data, bias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); THCudaCheck(hipGetLastError()); } // clean THCudaTensor_free(state, input); } void THNN_CudaSpatialSubSampling_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *weight, int kW, int kH, int dW, int dH) { THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput); int nInputPlane = THCudaTensor_size(state, weight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernel(HIP_KERNEL_NAME(subgradinput), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernel(HIP_KERNEL_NAME(subgradinputAtomic), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *weight_data = THCudaTensor_data(state, weight); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH <= dH && kW <= dW) { hipLaunchKernel(HIP_KERNEL_NAME(subgradinput), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernel(HIP_KERNEL_NAME(subgradinputAtomic), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), gradInput_data, gradOutput_data, weight_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } THCudaCheck(hipGetLastError()); } } void THNN_CudaSpatialSubSampling_accGradParameters(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradWeight, THCudaTensor *gradBias, int kW, int kH, int dW, int dH, float scale) { THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias); int nInputPlane = THCudaTensor_size(state, gradWeight, 0); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel hipLaunchKernel(HIP_KERNEL_NAME(subgradweight), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data, gradOutput_data, gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); THCudaCheck(hipGetLastError()); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; float *gradWeight_data = THCudaTensor_data(state, gradWeight); float *gradBias_data = THCudaTensor_data(state, gradBias); float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *input_data; input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); // cuda blocks & threads: dim3 blocks(nInputPlane); dim3 threads(32,8); // run gradweight kernel long sl; for (sl=0; sl<nbatch; sl++) { hipLaunchKernel(HIP_KERNEL_NAME(subgradweight), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), input_data + sl*input->stride[0], gradOutput_data + sl*gradOutput->stride[0], gradWeight_data, gradBias_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale); } THCudaCheck(hipGetLastError()); } // clean THCudaTensor_free(state, input); } #undef CUDA_MAX_THREADS
083bc879b61f28b2486b6b85d40eceead9bd6de1.hip
// !!! This is a file automatically generated by hipify!!! // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/random.h> #include "rasterizeKernels.h" #include "rasterizeTools.h" glm::vec3* framebuffer; fragment* depthbuffer; float* device_vbo; float* device_cbo; int* device_ibo; float* device_nbo; triangle* primitives; glm::vec3* device_tbo; hipEvent_t start, stop; float timeDuration; void checkCUDAError(const char *msg) { hipError_t err = hipGetLastError(); if( hipSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } __host__ __device__ glm::vec3 findTextureColor(glm::vec3 point, glm::vec3 * texColors, tex theTex){ //int id = theTex.id; //int size = theTex.h * theTex.w; //printf("point: %.2f, %.2f, %.2f\n", point.r, point.g, point.b); glm::vec3 result; float u,v; if( fabs( point.x - 0.1f ) < 0.002f){ u = point.y + 0.5f; v = point.z + 0.5f; } else if( fabs( point.y - 0.1f )< 0.002f){ u = point.x + 0.5f; v = point.z + 0.5f; } else if( fabs( point.z - 0.1f ) < 0.002f){ u = point.x + 0.5f; v = point.y + 0.5f; } //int idx = ( (int) v * theTex.w )* theTex.h + (int)u * theTex.h; //result.r = texColors[idx].r/255.0f; //result.g = texColors[idx].g/255.0f; //result.b = texColors[idx].b/255.0f; //printf("texture color: %.2f, %.2f, %.2f\n", result.r, result.g, result.b); return result; } __host__ __device__ glm::vec3 getTextureColor(glm::vec3 &point, triangle &thePoly, glm::vec3 * texColors, tex &theTex){ glm::vec3 result(0.0f, 0.0f, 0.0f); triangle newPoly = thePoly; //Shift XY coordinate system (+0.5, +0.5) to match the subpixeling technique newPoly.p0 += glm::vec3(0.5f, 0.5f, 0.0f); newPoly.p1 += glm::vec3(0.5f, 0.5f, 0.0f); newPoly.p2 += glm::vec3(0.5f, 0.5f, 0.0f); //Calculate alternative 1/Z, U/Z and V/Z values which will be interpolated newPoly.uv0 /= newPoly.p0.z; newPoly.uv1 /= newPoly.p1.z; newPoly.uv2 /= newPoly.p2.z; // Sort the vertices in ascending Y order glm::vec3 tempf; #define swapPoint(m, n) tempf = m; m = n; n = tempf; if (newPoly.p0.y > newPoly.p1.y) //swap p0 and p1 swapPoint(newPoly.p0, newPoly.p1); if (newPoly.p0.y > newPoly.p2.y) //swap p0 and p2 swapPoint(newPoly.p0, newPoly.p2); if (newPoly.p1.y > newPoly.p2.y) //swap p1 and p2 swapPoint(newPoly.p1, newPoly.p2); #undef swapPoint float x0 = newPoly.p0.x; float y0 = newPoly.p0.y; float z0 = newPoly.p0.z; float x1 = newPoly.p1.x; float y1 = newPoly.p1.y; float z1 = newPoly.p1.z; float x2 = newPoly.p2.x; float y2 = newPoly.p2.y; float z2 = newPoly.p2.z; float y0i = y0; float y1i = y1; float y2i = y2; printf("swappded points: %.2f, %.2f, %.2f", y0, y1, y2); if ((y0i == y1i && y0i == y2i) || ((int) x0 == (int) x1 && (int) x0 == (int) x2)) return result; // Calculate horizontal and vertical increments for UV axes float denom = ((x2 - x0) * (y1 - y0) - (x1 - x0) * (y2 - y0)); if (!denom) // Skip if it's an infinitely thin line return result; denom = 1 / denom; glm::vec3 duv_dx, duv_dy; // ( d(1/z), d(u/z), d(v/z) ) duv_dx.x = ((newPoly.uv2.x - newPoly.uv0.x) * (y1 - y0) - (newPoly.uv1.x - newPoly.uv0.x) * (y2 - y0)) * denom; duv_dx.y = ((newPoly.uv2.y - newPoly.uv0.y) * (y1 - y0) - (newPoly.uv1.y - newPoly.uv0.y) * (y2 - y0)) * denom; duv_dx.z = ((newPoly.uv2.z - newPoly.uv0.z) * (y1 - y0) - (newPoly.uv1.z - newPoly.uv0.z) * (y2 - y0)) * denom; duv_dy.x = ((newPoly.uv2.x - newPoly.uv0.x) * (x2 - x0) - (newPoly.uv2.x - newPoly.uv0.x) * (x1 - x0)) * denom; duv_dy.y = ((newPoly.uv2.y - newPoly.uv0.y) * (x2 - x0) - (newPoly.uv2.y - newPoly.uv0.y) * (x1 - x0)) * denom; duv_dy.z = ((newPoly.uv2.z - newPoly.uv0.z) * (x2 - x0) - (newPoly.uv2.z - newPoly.uv0.z) * (x1 - x0)) * denom; // Calculate X-slopes along the edges float dx_dy0, dx_dy1, dx_dy2; if (y1 > y0) dx_dy0 = (x1 - x0) / (y1 - y0); if (y2 > y0) dx_dy1 = (x2 - x0) / (y2 - y0); if (y2 > y1) dx_dy2 = (x2 - x1) / (y2 - y1); // Determine which side of the poly the longer edge is on int side = dx_dy1 > dx_dy0; if (y0 == y1) side = x0 > x1; if (y1 == y2) side = x2 > x1; return result; } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } //TODO: Implement a vertex shader __global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, cudaMat4 M_mvp, cudaMat4 M_mv_prime){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ //Transform incoming vertex position from model to clip coordinates glm::vec3 pModel(vbo[index*3], vbo[index*3 + 1],vbo[index*3 + 2]); glm::vec3 pClip = multiplyMV(M_mvp, glm::vec4(pModel, 1.0f)); //Transform normal into clip coordinates glm::vec3 nModel(nbo[index*3], nbo[index*3 + 1],nbo[index*3 + 2]); /*glm::vec3 nTip_OS = pModel + nModel; glm::vec3 nTip_WS = multiplyMV(theCam.M_mvp, glm::vec4(nTip_OS, 1.0f)); glm::vec3 nClip = glm::normalize(nTip_WS - pClip);*/ // glm::vec3 nClip = glm::normalize( multiplyMV(theCam.M_mvp, glm::vec4(nModel, 0.0f))); glm::vec3 nClip = glm::normalize( multiplyMV(M_mv_prime, glm::vec4(nModel, 0.0f))); vbo[index*3] = pClip.x; vbo[index*3 + 1] = pClip.y; vbo[index*3 + 2] = pClip.z; nbo[index*3] = nClip.x; nbo[index*3 + 1] = nClip.y; nbo[index*3 + 2] = nClip.z; } } //TODO: Implement primative assembly __global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, triangle* primitives, int SHADING){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int primitivesCount = ibosize/3; if(index<primitivesCount){ //get indice number int i0 = ibo[index*3]; int i1 = ibo[index*3+1]; int i2 = ibo[index*3+2]; //assemble primitive points primitives[index].p0 = glm::vec3(vbo[i0*3], vbo[i0*3+1], vbo[i0*3+2]); primitives[index].p1 = glm::vec3(vbo[i1*3], vbo[i1*3+1], vbo[i1*3+2]); primitives[index].p2 = glm::vec3(vbo[i2*3], vbo[i2*3+1], vbo[i2*3+2]); //assemble primitive colors if(SHADING == 5){ //original cbo primitives[index].c0 = glm::vec3(cbo[0], cbo[1], cbo[2]); primitives[index].c1 = glm::vec3(cbo[3], cbo[4], cbo[5]); primitives[index].c2 = glm::vec3(cbo[6], cbo[7], cbo[8]); } else{ primitives[index].c0 = glm::vec3(1,1,1); primitives[index].c1 = glm::vec3(1,1,1); primitives[index].c2 = glm::vec3(1,1,1); } //assemble primitive normals; glm::vec3 n0 = glm::vec3(nbo[i0*3], nbo[i0*3+1], nbo[i0*3+2]); glm::vec3 n1 = glm::vec3(nbo[i1*3], nbo[i1*3+1], nbo[i1*3+2]); glm::vec3 n2 = glm::vec3(nbo[i2*3], nbo[i2*3+1], nbo[i2*3+2]); primitives[index].n = (n0 + n1 + n2)/3.0f; } } //TODO: Implement a rasterization method, such as scanline. __global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, glm::vec3* texColor, int texSize, tex theTex, int LINE, int SHADING){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ triangle tri = primitives[index]; if(tri.n.z <0){ // back facing triangles return; } //get bounding box for this triangle glm::vec3 minpoint, maxpoint; // in the screen coordinates, floats getAABBForTriangle(tri, minpoint, maxpoint); glm::vec2 minPoint, maxPoint; //in image coodinates, ints minPoint = screenToImage( glm::vec2(minpoint.x, minpoint.y), resolution); //viewport transform maxPoint = screenToImage( glm::vec2(maxpoint.x, maxpoint.y), resolution); int xMin = (int)floor(minPoint.x); int xMax = (int)ceil(maxPoint.x); int yMin = (int)floor(maxPoint.y); int yMax = (int)ceil(minPoint.y); //printf("min = %.2f, %.2f; max = %.2f, %.2f\n", minpoint.x, minpoint.y, maxpoint.x, maxpoint.y); // printf("min = %.2f, %.2f; max = %.2f, %.2f\n", minPoint.x, minPoint.y, maxPoint.x, maxPoint.y); // clipping xMin = ( xMin > 0.0f )? xMin : 0.0f; yMin = ( yMin > 0.0f) ? yMin : 0.0f; xMax = ( xMax < resolution.x -1) ? xMax : resolution.x -1; yMax = ( yMax < resolution.y -1) ? yMax : resolution.y -1; if(xMin<0 || yMin<0 || xMin>=resolution.x || yMin>=resolution.y) return; if(xMax<0 || yMax<0 || xMax>=resolution.x || yMax>=resolution.y) return; //scanline approach for(int y = yMin; y < yMax; y++){ //top to down for(int x = xMin; x < xMax; x++){ //left to right int pixelID = x + resolution.x*y; glm::vec2 screenCoord = imageToScreen(glm::vec2(x,y),resolution); //perspective transformation glm::vec3 b = calculateBarycentricCoordinate(tri, screenCoord); //barycentric coordinate for (x,y) pixel if(isBarycentricCoordInBounds(b)){ //p is in the triangle bounds float z = getZAtCoordinate(b, tri); //depth if(Z_TEST == 1){ //do the depth test with atomic function // while(atomicCAS(&depthbuffer[pixelID].tested, 1, 0) != 1); //until current fragment is tested } if(z > depthbuffer[pixelID].position.z && z <= 1.0f){ fragment frag = depthbuffer[pixelID]; frag.color = interpolateColor(b,tri); /*frag.position = interpolatePosition(b,tri); frag.position.z = z;*/ glm::vec3 point(screenCoord.x, screenCoord.y, z); frag.position = point; frag.normal = tri.n; if(LINE == 1){ //shade line color glm::vec3 lineColor(0.0f,0.0f,1.0f); //blue glm::vec3 p = interpolatePosition(b,tri); if(fabs(glm::dot(glm::normalize(tri.p0 - p), glm::normalize(tri.p0 - tri.p1))-1.0f)<0.0001f|| fabs(glm::dot(glm::normalize(tri.p1 - p), glm::normalize(tri.p1 - tri.p2))-1.0f)<0.0001f || fabs(glm::dot(glm::normalize(tri.p2 - p), glm::normalize(tri.p2 - tri.p0))-1.0f)<0.0001f ){ frag.color = lineColor; frag.normal = glm::vec3(0.0f, 0.0f, 1.0f); } } if(SHADING == 4){ // perspectively correct texture map //http://www.lysator.liu.se/~mikaelk/doc/perspectivetexture/ // http://chrishecker.com/Miscellaneous_Technical_Articles //glm::vec3 p = multiplyMV(M_mvp_inverse, glm::vec4(depthbuffer[index].position, 1.0f)); // glm::vec3 p1 = multiplyMV(M_mvp_inverse, glm::vec4(primitives[index].p1, 1.0f)); // glm::vec3 p2 = multiplyMV(M_mvp_inverse, glm::vec4(primitives[index].p2, 1.0f)); //primitives[index].c0 = findTextureColor(p0, texColor, theTex); //primitives[index].c1 = findTextureColor(p1, texColor, theTex); // primitives[index].c2 = findTextureColor(p2, texColor, theTex); } depthbuffer[pixelID] = frag; } // atomicExch(&depthbuffer[pixelID].tested, 1); } } } primitives[index] = tri; //update } } // display points __global__ void rasterizationPointsKernel(float* vbo, int vbosize, float * nbo, int nbosize, fragment* depthbuffer, glm::vec2 resolution){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ //find the point glm::vec3 point(vbo[index*3], vbo[index*3+1], vbo[index*3+2]); glm::vec3 normal(nbo[index*3], nbo[index*3+1], nbo[index*3+2]); if(normal.z < 0) return; //locate the pixel glm::vec2 pixel = screenToImage( glm::vec2(point.x, point.y), resolution); //viewport transform if(pixel.x<0 || pixel.y<0 || pixel.x>=resolution.x || pixel.y>=resolution.y) return; int pixelID = pixel.x + pixel.y * resolution.x; //shade the point representation if(point.z > depthbuffer[pixelID].position.z ){ glm::vec3 pointColor(1.0f, 1.0f, 0.0f); //yellow /* depthbuffer[pixelID].position = point; depthbuffer[pixelID].color = pointColor; depthbuffer[pixelID].normal = glm::vec3(0.0f, 0.0f, 1.0f);*/ for(int i=pixel.x-1; i<=pixel.x+1; i++){ for(int j=pixel.y-1; j<=pixel.y+1; j++){ if(i<0 || j<0 || i>=resolution.x || j>=resolution.y) return; int newpixelID = i + j * resolution.x; depthbuffer[newpixelID].position = point; depthbuffer[newpixelID].color = pointColor; depthbuffer[newpixelID].normal = glm::vec3(0.0f, 0.0f, 1.0f); // atomicExch(&depthbuffer[pixelID].tested, 1); } } } } } //TODO: Implement a fragment shader __global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, int SHADING){ //set up light glm::vec3 lightPos(500.0f, 500.0f, 1000.0f); //add a light in the scene for shading int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].normal.z<0) return; if(SHADING == 0){ //shade by normal depthbuffer[index].color.r = glm::clamp(depthbuffer[index].normal.x, 0.0f, 1.0f); depthbuffer[index].color.g = glm::clamp(depthbuffer[index].normal.y, 0.0f, 1.0f); depthbuffer[index].color.b = glm::clamp(depthbuffer[index].normal.z, 0.0f, 1.0f); } else if(SHADING == 1){ //shade by depth depthbuffer[index].color.r = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); depthbuffer[index].color.g = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); depthbuffer[index].color.b = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); } else if(SHADING == 2){ //diffuse shade glm::vec3 lightDir = glm::normalize(lightPos - depthbuffer[index].position); float cosTerm = glm::clamp(glm::dot(lightDir, depthbuffer[index].normal), 0.0f, 1.0f); depthbuffer[index].color = glm::clamp(cosTerm * depthbuffer[index].color, 0.0f, 1.0f); } else if (SHADING == 3){ //blinn-phong shade float coeff = 5.0f; glm::vec3 lightDir = glm::normalize(lightPos - depthbuffer[index].position); float cosTerm = glm::clamp(glm::dot(lightDir, depthbuffer[index].normal), 0.0f, 1.0f); depthbuffer[index].color = glm::clamp( ::pow(cosTerm,coeff) * depthbuffer[index].color, 0.0f, 1.0f); } else{ depthbuffer[index].color =glm::clamp(depthbuffer[index].color, 0.0f, 1.0f); } } } //Writes fragment colors to the framebuffer __global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ framebuffer[index] = depthbuffer[index].color; } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float * nbo, int nbosize){ //set up camera, // cam theCam = mouseCam; //cuda timer event hipEventCreate(&start); hipEventCreate(&stop); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0,0,0); frag.normal = glm::vec3(0,0,0); frag.position = glm::vec3(0,0,-10000); hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer,frag); //------------------------------ //memory stuff //------------------------------ hipEventRecord( start, 0 ); primitives = NULL; hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); device_ibo = NULL; hipMalloc((void**)&device_ibo, ibosize*sizeof(int)); hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice); device_vbo = NULL; hipMalloc((void**)&device_vbo, vbosize*sizeof(float)); hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice); device_cbo = NULL; hipMalloc((void**)&device_cbo, cbosize*sizeof(float)); hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice); device_nbo = NULL; hipMalloc((void**)&device_nbo, nbosize*sizeof(float)); hipMemcpy( device_nbo, nbo, nbosize*sizeof(float), hipMemcpyHostToDevice); int tbosize = 0; device_tbo = NULL; if( SHADING_MODE == 5 && textureColor.size()!=0 ){ //texture map!!! //establish color vector tbosize = textureColor.size(); glm::vec3 * tbo = new glm::vec3[tbosize]; for(int i=0; i< tbosize; i++){ tbo[i] = textureColor[i]; } hipMalloc((void**)&device_tbo, tbosize*sizeof(glm::vec3)); hipMemcpy( device_tbo, tbo, tbosize*sizeof(glm::vec3), hipMemcpyHostToDevice); delete tbo; } tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for set up memory : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //vertex shader //------------------------------ hipEventRecord( start, 0 ); hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_nbo, nbosize, mouseCam.M_mvp, mouseCam.M_mv_prime); hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for vertex shader : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //primitive assembly //------------------------------ hipEventRecord( start, 0 ); primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, device_nbo, nbosize, primitives, SHADING_MODE); hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for primitive assembly : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //rasterization //------------------------------ hipEventRecord( start, 0 ); hipLaunchKernelGGL(( rasterizationKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, ibosize/3, depthbuffer, resolution, device_tbo, tbosize, textureMap, LINE_RASTER, SHADING_MODE); hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for rasterization : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //fragment shader //------------------------------ hipEventRecord( start, 0 ); hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution, SHADING_MODE); hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for fragment shader : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //point raster shader //------------------------------ if(POINT_RASTER ==1){ hipEventRecord( start, 0 ); primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); hipLaunchKernelGGL(( rasterizationPointsKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_nbo, nbosize, depthbuffer, resolution); //render point out hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for point raster : %f ms\n",timeDuration); printf("*****************************************************\n"); } } //------------------------------ //write fragments to framebuffer //------------------------------ hipEventRecord( start, 0 ); hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer); hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer); hipDeviceSynchronize(); hipEventRecord( stop, 0 ); hipEventSynchronize( stop ); hipEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for render : %f ms\n",timeDuration); printf("*****************************************************\n"); } kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ hipFree( primitives ); hipFree( device_vbo ); hipFree( device_cbo ); hipFree( device_ibo ); hipFree( framebuffer ); hipFree( depthbuffer ); }
083bc879b61f28b2486b6b85d40eceead9bd6de1.cu
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania // Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania #include <stdio.h> #include <cuda.h> #include <cmath> #include <thrust/random.h> #include "rasterizeKernels.h" #include "rasterizeTools.h" glm::vec3* framebuffer; fragment* depthbuffer; float* device_vbo; float* device_cbo; int* device_ibo; float* device_nbo; triangle* primitives; glm::vec3* device_tbo; cudaEvent_t start, stop; float timeDuration; void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } } //Handy dandy little hashing function that provides seeds for random number generation __host__ __device__ unsigned int hash(unsigned int a){ a = (a+0x7ed55d16) + (a<<12); a = (a^0xc761c23c) ^ (a>>19); a = (a+0x165667b1) + (a<<5); a = (a+0xd3a2646c) ^ (a<<9); a = (a+0xfd7046c5) + (a<<3); a = (a^0xb55a4f09) ^ (a>>16); return a; } //Writes a given fragment to a fragment buffer at a given location __host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; depthbuffer[index] = frag; } } //Reads a fragment from a given location in a fragment buffer __host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return depthbuffer[index]; }else{ fragment f; return f; } } //Writes a given pixel to a pixel buffer at a given location __host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; framebuffer[index] = value; } } //Reads a pixel from a pixel buffer at a given location __host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){ if(x<resolution.x && y<resolution.y){ int index = (y*resolution.x) + x; return framebuffer[index]; }else{ return glm::vec3(0,0,0); } } __host__ __device__ glm::vec3 findTextureColor(glm::vec3 point, glm::vec3 * texColors, tex theTex){ //int id = theTex.id; //int size = theTex.h * theTex.w; //printf("point: %.2f, %.2f, %.2f\n", point.r, point.g, point.b); glm::vec3 result; float u,v; if( fabs( point.x - 0.1f ) < 0.002f){ u = point.y + 0.5f; v = point.z + 0.5f; } else if( fabs( point.y - 0.1f )< 0.002f){ u = point.x + 0.5f; v = point.z + 0.5f; } else if( fabs( point.z - 0.1f ) < 0.002f){ u = point.x + 0.5f; v = point.y + 0.5f; } //int idx = ( (int) v * theTex.w )* theTex.h + (int)u * theTex.h; //result.r = texColors[idx].r/255.0f; //result.g = texColors[idx].g/255.0f; //result.b = texColors[idx].b/255.0f; //printf("texture color: %.2f, %.2f, %.2f\n", result.r, result.g, result.b); return result; } __host__ __device__ glm::vec3 getTextureColor(glm::vec3 &point, triangle &thePoly, glm::vec3 * texColors, tex &theTex){ glm::vec3 result(0.0f, 0.0f, 0.0f); triangle newPoly = thePoly; //Shift XY coordinate system (+0.5, +0.5) to match the subpixeling technique newPoly.p0 += glm::vec3(0.5f, 0.5f, 0.0f); newPoly.p1 += glm::vec3(0.5f, 0.5f, 0.0f); newPoly.p2 += glm::vec3(0.5f, 0.5f, 0.0f); //Calculate alternative 1/Z, U/Z and V/Z values which will be interpolated newPoly.uv0 /= newPoly.p0.z; newPoly.uv1 /= newPoly.p1.z; newPoly.uv2 /= newPoly.p2.z; // Sort the vertices in ascending Y order glm::vec3 tempf; #define swapPoint(m, n) tempf = m; m = n; n = tempf; if (newPoly.p0.y > newPoly.p1.y) //swap p0 and p1 swapPoint(newPoly.p0, newPoly.p1); if (newPoly.p0.y > newPoly.p2.y) //swap p0 and p2 swapPoint(newPoly.p0, newPoly.p2); if (newPoly.p1.y > newPoly.p2.y) //swap p1 and p2 swapPoint(newPoly.p1, newPoly.p2); #undef swapPoint float x0 = newPoly.p0.x; float y0 = newPoly.p0.y; float z0 = newPoly.p0.z; float x1 = newPoly.p1.x; float y1 = newPoly.p1.y; float z1 = newPoly.p1.z; float x2 = newPoly.p2.x; float y2 = newPoly.p2.y; float z2 = newPoly.p2.z; float y0i = y0; float y1i = y1; float y2i = y2; printf("swappded points: %.2f, %.2f, %.2f", y0, y1, y2); if ((y0i == y1i && y0i == y2i) || ((int) x0 == (int) x1 && (int) x0 == (int) x2)) return result; // Calculate horizontal and vertical increments for UV axes float denom = ((x2 - x0) * (y1 - y0) - (x1 - x0) * (y2 - y0)); if (!denom) // Skip if it's an infinitely thin line return result; denom = 1 / denom; glm::vec3 duv_dx, duv_dy; // ( d(1/z), d(u/z), d(v/z) ) duv_dx.x = ((newPoly.uv2.x - newPoly.uv0.x) * (y1 - y0) - (newPoly.uv1.x - newPoly.uv0.x) * (y2 - y0)) * denom; duv_dx.y = ((newPoly.uv2.y - newPoly.uv0.y) * (y1 - y0) - (newPoly.uv1.y - newPoly.uv0.y) * (y2 - y0)) * denom; duv_dx.z = ((newPoly.uv2.z - newPoly.uv0.z) * (y1 - y0) - (newPoly.uv1.z - newPoly.uv0.z) * (y2 - y0)) * denom; duv_dy.x = ((newPoly.uv2.x - newPoly.uv0.x) * (x2 - x0) - (newPoly.uv2.x - newPoly.uv0.x) * (x1 - x0)) * denom; duv_dy.y = ((newPoly.uv2.y - newPoly.uv0.y) * (x2 - x0) - (newPoly.uv2.y - newPoly.uv0.y) * (x1 - x0)) * denom; duv_dy.z = ((newPoly.uv2.z - newPoly.uv0.z) * (x2 - x0) - (newPoly.uv2.z - newPoly.uv0.z) * (x1 - x0)) * denom; // Calculate X-slopes along the edges float dx_dy0, dx_dy1, dx_dy2; if (y1 > y0) dx_dy0 = (x1 - x0) / (y1 - y0); if (y2 > y0) dx_dy1 = (x2 - x0) / (y2 - y0); if (y2 > y1) dx_dy2 = (x2 - x1) / (y2 - y1); // Determine which side of the poly the longer edge is on int side = dx_dy1 > dx_dy0; if (y0 == y1) side = x0 > x1; if (y1 == y2) side = x2 > x1; return result; } //Kernel that clears a given pixel buffer with a given color __global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ image[index] = color; } } //Kernel that clears a given fragment buffer with a given fragment __global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ fragment f = frag; f.position.x = x; f.position.y = y; buffer[index] = f; } } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ glm::vec3 color; color.x = image[index].x*255.0; color.y = image[index].y*255.0; color.z = image[index].z*255.0; if(color.x>255){ color.x = 255; } if(color.y>255){ color.y = 255; } if(color.z>255){ color.z = 255; } // Each thread writes one pixel location in the texture (textel) PBOpos[index].w = 0; PBOpos[index].x = color.x; PBOpos[index].y = color.y; PBOpos[index].z = color.z; } } //TODO: Implement a vertex shader __global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, cudaMat4 M_mvp, cudaMat4 M_mv_prime){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ //Transform incoming vertex position from model to clip coordinates glm::vec3 pModel(vbo[index*3], vbo[index*3 + 1],vbo[index*3 + 2]); glm::vec3 pClip = multiplyMV(M_mvp, glm::vec4(pModel, 1.0f)); //Transform normal into clip coordinates glm::vec3 nModel(nbo[index*3], nbo[index*3 + 1],nbo[index*3 + 2]); /*glm::vec3 nTip_OS = pModel + nModel; glm::vec3 nTip_WS = multiplyMV(theCam.M_mvp, glm::vec4(nTip_OS, 1.0f)); glm::vec3 nClip = glm::normalize(nTip_WS - pClip);*/ // glm::vec3 nClip = glm::normalize( multiplyMV(theCam.M_mvp, glm::vec4(nModel, 0.0f))); glm::vec3 nClip = glm::normalize( multiplyMV(M_mv_prime, glm::vec4(nModel, 0.0f))); vbo[index*3] = pClip.x; vbo[index*3 + 1] = pClip.y; vbo[index*3 + 2] = pClip.z; nbo[index*3] = nClip.x; nbo[index*3 + 1] = nClip.y; nbo[index*3 + 2] = nClip.z; } } //TODO: Implement primative assembly __global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, triangle* primitives, int SHADING){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; int primitivesCount = ibosize/3; if(index<primitivesCount){ //get indice number int i0 = ibo[index*3]; int i1 = ibo[index*3+1]; int i2 = ibo[index*3+2]; //assemble primitive points primitives[index].p0 = glm::vec3(vbo[i0*3], vbo[i0*3+1], vbo[i0*3+2]); primitives[index].p1 = glm::vec3(vbo[i1*3], vbo[i1*3+1], vbo[i1*3+2]); primitives[index].p2 = glm::vec3(vbo[i2*3], vbo[i2*3+1], vbo[i2*3+2]); //assemble primitive colors if(SHADING == 5){ //original cbo primitives[index].c0 = glm::vec3(cbo[0], cbo[1], cbo[2]); primitives[index].c1 = glm::vec3(cbo[3], cbo[4], cbo[5]); primitives[index].c2 = glm::vec3(cbo[6], cbo[7], cbo[8]); } else{ primitives[index].c0 = glm::vec3(1,1,1); primitives[index].c1 = glm::vec3(1,1,1); primitives[index].c2 = glm::vec3(1,1,1); } //assemble primitive normals; glm::vec3 n0 = glm::vec3(nbo[i0*3], nbo[i0*3+1], nbo[i0*3+2]); glm::vec3 n1 = glm::vec3(nbo[i1*3], nbo[i1*3+1], nbo[i1*3+2]); glm::vec3 n2 = glm::vec3(nbo[i2*3], nbo[i2*3+1], nbo[i2*3+2]); primitives[index].n = (n0 + n1 + n2)/3.0f; } } //TODO: Implement a rasterization method, such as scanline. __global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, glm::vec3* texColor, int texSize, tex theTex, int LINE, int SHADING){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<primitivesCount){ triangle tri = primitives[index]; if(tri.n.z <0){ // back facing triangles return; } //get bounding box for this triangle glm::vec3 minpoint, maxpoint; // in the screen coordinates, floats getAABBForTriangle(tri, minpoint, maxpoint); glm::vec2 minPoint, maxPoint; //in image coodinates, ints minPoint = screenToImage( glm::vec2(minpoint.x, minpoint.y), resolution); //viewport transform maxPoint = screenToImage( glm::vec2(maxpoint.x, maxpoint.y), resolution); int xMin = (int)floor(minPoint.x); int xMax = (int)ceil(maxPoint.x); int yMin = (int)floor(maxPoint.y); int yMax = (int)ceil(minPoint.y); //printf("min = %.2f, %.2f; max = %.2f, %.2f\n", minpoint.x, minpoint.y, maxpoint.x, maxpoint.y); // printf("min = %.2f, %.2f; max = %.2f, %.2f\n", minPoint.x, minPoint.y, maxPoint.x, maxPoint.y); // clipping xMin = ( xMin > 0.0f )? xMin : 0.0f; yMin = ( yMin > 0.0f) ? yMin : 0.0f; xMax = ( xMax < resolution.x -1) ? xMax : resolution.x -1; yMax = ( yMax < resolution.y -1) ? yMax : resolution.y -1; if(xMin<0 || yMin<0 || xMin>=resolution.x || yMin>=resolution.y) return; if(xMax<0 || yMax<0 || xMax>=resolution.x || yMax>=resolution.y) return; //scanline approach for(int y = yMin; y < yMax; y++){ //top to down for(int x = xMin; x < xMax; x++){ //left to right int pixelID = x + resolution.x*y; glm::vec2 screenCoord = imageToScreen(glm::vec2(x,y),resolution); //perspective transformation glm::vec3 b = calculateBarycentricCoordinate(tri, screenCoord); //barycentric coordinate for (x,y) pixel if(isBarycentricCoordInBounds(b)){ //p is in the triangle bounds float z = getZAtCoordinate(b, tri); //depth if(Z_TEST == 1){ //do the depth test with atomic function // while(atomicCAS(&depthbuffer[pixelID].tested, 1, 0) != 1); //until current fragment is tested } if(z > depthbuffer[pixelID].position.z && z <= 1.0f){ fragment frag = depthbuffer[pixelID]; frag.color = interpolateColor(b,tri); /*frag.position = interpolatePosition(b,tri); frag.position.z = z;*/ glm::vec3 point(screenCoord.x, screenCoord.y, z); frag.position = point; frag.normal = tri.n; if(LINE == 1){ //shade line color glm::vec3 lineColor(0.0f,0.0f,1.0f); //blue glm::vec3 p = interpolatePosition(b,tri); if(fabs(glm::dot(glm::normalize(tri.p0 - p), glm::normalize(tri.p0 - tri.p1))-1.0f)<0.0001f|| fabs(glm::dot(glm::normalize(tri.p1 - p), glm::normalize(tri.p1 - tri.p2))-1.0f)<0.0001f || fabs(glm::dot(glm::normalize(tri.p2 - p), glm::normalize(tri.p2 - tri.p0))-1.0f)<0.0001f ){ frag.color = lineColor; frag.normal = glm::vec3(0.0f, 0.0f, 1.0f); } } if(SHADING == 4){ // perspectively correct texture map //http://www.lysator.liu.se/~mikaelk/doc/perspectivetexture/ // http://chrishecker.com/Miscellaneous_Technical_Articles //glm::vec3 p = multiplyMV(M_mvp_inverse, glm::vec4(depthbuffer[index].position, 1.0f)); // glm::vec3 p1 = multiplyMV(M_mvp_inverse, glm::vec4(primitives[index].p1, 1.0f)); // glm::vec3 p2 = multiplyMV(M_mvp_inverse, glm::vec4(primitives[index].p2, 1.0f)); //primitives[index].c0 = findTextureColor(p0, texColor, theTex); //primitives[index].c1 = findTextureColor(p1, texColor, theTex); // primitives[index].c2 = findTextureColor(p2, texColor, theTex); } depthbuffer[pixelID] = frag; } // atomicExch(&depthbuffer[pixelID].tested, 1); } } } primitives[index] = tri; //update } } // display points __global__ void rasterizationPointsKernel(float* vbo, int vbosize, float * nbo, int nbosize, fragment* depthbuffer, glm::vec2 resolution){ int index = (blockIdx.x * blockDim.x) + threadIdx.x; if(index<vbosize/3){ //find the point glm::vec3 point(vbo[index*3], vbo[index*3+1], vbo[index*3+2]); glm::vec3 normal(nbo[index*3], nbo[index*3+1], nbo[index*3+2]); if(normal.z < 0) return; //locate the pixel glm::vec2 pixel = screenToImage( glm::vec2(point.x, point.y), resolution); //viewport transform if(pixel.x<0 || pixel.y<0 || pixel.x>=resolution.x || pixel.y>=resolution.y) return; int pixelID = pixel.x + pixel.y * resolution.x; //shade the point representation if(point.z > depthbuffer[pixelID].position.z ){ glm::vec3 pointColor(1.0f, 1.0f, 0.0f); //yellow /* depthbuffer[pixelID].position = point; depthbuffer[pixelID].color = pointColor; depthbuffer[pixelID].normal = glm::vec3(0.0f, 0.0f, 1.0f);*/ for(int i=pixel.x-1; i<=pixel.x+1; i++){ for(int j=pixel.y-1; j<=pixel.y+1; j++){ if(i<0 || j<0 || i>=resolution.x || j>=resolution.y) return; int newpixelID = i + j * resolution.x; depthbuffer[newpixelID].position = point; depthbuffer[newpixelID].color = pointColor; depthbuffer[newpixelID].normal = glm::vec3(0.0f, 0.0f, 1.0f); // atomicExch(&depthbuffer[pixelID].tested, 1); } } } } } //TODO: Implement a fragment shader __global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, int SHADING){ //set up light glm::vec3 lightPos(500.0f, 500.0f, 1000.0f); //add a light in the scene for shading int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ if(depthbuffer[index].normal.z<0) return; if(SHADING == 0){ //shade by normal depthbuffer[index].color.r = glm::clamp(depthbuffer[index].normal.x, 0.0f, 1.0f); depthbuffer[index].color.g = glm::clamp(depthbuffer[index].normal.y, 0.0f, 1.0f); depthbuffer[index].color.b = glm::clamp(depthbuffer[index].normal.z, 0.0f, 1.0f); } else if(SHADING == 1){ //shade by depth depthbuffer[index].color.r = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); depthbuffer[index].color.g = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); depthbuffer[index].color.b = glm::clamp(depthbuffer[index].position.z/1000.0f, 0.0f, 1.0f); } else if(SHADING == 2){ //diffuse shade glm::vec3 lightDir = glm::normalize(lightPos - depthbuffer[index].position); float cosTerm = glm::clamp(glm::dot(lightDir, depthbuffer[index].normal), 0.0f, 1.0f); depthbuffer[index].color = glm::clamp(cosTerm * depthbuffer[index].color, 0.0f, 1.0f); } else if (SHADING == 3){ //blinn-phong shade float coeff = 5.0f; glm::vec3 lightDir = glm::normalize(lightPos - depthbuffer[index].position); float cosTerm = glm::clamp(glm::dot(lightDir, depthbuffer[index].normal), 0.0f, 1.0f); depthbuffer[index].color = glm::clamp( std::pow(cosTerm,coeff) * depthbuffer[index].color, 0.0f, 1.0f); } else{ depthbuffer[index].color =glm::clamp(depthbuffer[index].color, 0.0f, 1.0f); } } } //Writes fragment colors to the framebuffer __global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer){ int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int index = x + (y * resolution.x); if(x<=resolution.x && y<=resolution.y){ framebuffer[index] = depthbuffer[index].color; } } // Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float * nbo, int nbosize){ //set up camera, // cam theCam = mouseCam; //cuda timer event cudaEventCreate(&start); cudaEventCreate(&stop); // set up crucial magic int tileSize = 8; dim3 threadsPerBlock(tileSize, tileSize); dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize))); //set up framebuffer framebuffer = NULL; cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3)); //set up depthbuffer depthbuffer = NULL; cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment)); //kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0)); fragment frag; frag.color = glm::vec3(0,0,0); frag.normal = glm::vec3(0,0,0); frag.position = glm::vec3(0,0,-10000); clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer,frag); //------------------------------ //memory stuff //------------------------------ cudaEventRecord( start, 0 ); primitives = NULL; cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle)); device_ibo = NULL; cudaMalloc((void**)&device_ibo, ibosize*sizeof(int)); cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice); device_vbo = NULL; cudaMalloc((void**)&device_vbo, vbosize*sizeof(float)); cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice); device_cbo = NULL; cudaMalloc((void**)&device_cbo, cbosize*sizeof(float)); cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice); device_nbo = NULL; cudaMalloc((void**)&device_nbo, nbosize*sizeof(float)); cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice); int tbosize = 0; device_tbo = NULL; if( SHADING_MODE == 5 && textureColor.size()!=0 ){ //texture map!!! //establish color vector tbosize = textureColor.size(); glm::vec3 * tbo = new glm::vec3[tbosize]; for(int i=0; i< tbosize; i++){ tbo[i] = textureColor[i]; } cudaMalloc((void**)&device_tbo, tbosize*sizeof(glm::vec3)); cudaMemcpy( device_tbo, tbo, tbosize*sizeof(glm::vec3), cudaMemcpyHostToDevice); delete tbo; } tileSize = 32; int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for set up memory : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //vertex shader //------------------------------ cudaEventRecord( start, 0 ); vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, mouseCam.M_mvp, mouseCam.M_mv_prime); cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for vertex shader : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //primitive assembly //------------------------------ cudaEventRecord( start, 0 ); primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize)); primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_cbo, cbosize, device_ibo, ibosize, device_nbo, nbosize, primitives, SHADING_MODE); cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for primitive assembly : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //rasterization //------------------------------ cudaEventRecord( start, 0 ); rasterizationKernel<<<primitiveBlocks, tileSize>>>(primitives, ibosize/3, depthbuffer, resolution, device_tbo, tbosize, textureMap, LINE_RASTER, SHADING_MODE); cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for rasterization : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //fragment shader //------------------------------ cudaEventRecord( start, 0 ); fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, SHADING_MODE); cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for fragment shader : %f ms\n",timeDuration); printf("*****************************************************\n"); } //------------------------------ //point raster shader //------------------------------ if(POINT_RASTER ==1){ cudaEventRecord( start, 0 ); primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize)); rasterizationPointsKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, depthbuffer, resolution); //render point out cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for point raster : %f ms\n",timeDuration); printf("*****************************************************\n"); } } //------------------------------ //write fragments to framebuffer //------------------------------ cudaEventRecord( start, 0 ); render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer); sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer); cudaDeviceSynchronize(); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &timeDuration, start, stop ); if(PERFORMANCE_MEASURE == 1){ printf("\n\n*****************************************************\n"); printf("Time Taken for render : %f ms\n",timeDuration); printf("*****************************************************\n"); } kernelCleanup(); checkCUDAError("Kernel failed!"); } void kernelCleanup(){ cudaFree( primitives ); cudaFree( device_vbo ); cudaFree( device_cbo ); cudaFree( device_ibo ); cudaFree( framebuffer ); cudaFree( depthbuffer ); }
ab120e4cee1b86a955c8488f4603aab4b8a899d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } }
ab120e4cee1b86a955c8488f4603aab4b8a899d4.cu
#include "includes.h" __global__ void unsafe(int *shared_var, int *values_read, int N, int iters) { int i; int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N) return; int old = *shared_var; *shared_var = old + 1; values_read[tid] = old; for (i = 0; i < iters; i++) { int old = *shared_var; *shared_var = old + 1; } }
4a7b8501770ee7e1990d7699af5b2b3b782918c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Zhenyu Ye * [email protected] * Eindhoven University of Technology */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_PREFETCH_H_ #define _MATRIXMUL_PREFETCH_H_ #include <stdio.h> #include "matrixMul.h" #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// __global__ void matrixMul_prefetch( float* C, float* A, float* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE * BLOCK_SIZE]; __shared__ float As2[BLOCK_SIZE * BLOCK_SIZE]; float *prefetch = As; float *prefetch2 = As2; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; float cv[BLOCK_SIZE] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * VECTOR_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; int cBegin = wB * BLOCK_SIZE * by + VECTOR_SIZE * BLOCK_SIZE * bx; // Csub is used to store the element of the block sub-matrix // that is computed by the thread // float Csub = 0; float *Ap = &A[aBegin + wA * ty +tx]; float *ap = &prefetch[ty + BLOCK_SIZE * tx]; #pragma unroll for(int i = 0; i < 16; i+=4){ ap[i] = Ap[wA * i]; } __syncthreads(); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix Ap = &A[a + aStep + wA * ty +tx]; float *ap2 = &prefetch2[ty + BLOCK_SIZE * tx]; #pragma unroll for(int i = 0; i < 16; i+=4){ ap2[i] = Ap[wA * i]; } ap = &prefetch[0]; float *bp = &B[b + BLOCK_SIZE * ty + tx]; #pragma unroll for(int i = 0; i < BLOCK_SIZE; i++){ float bv = bp[0]; cv[0] += ap[0] * bv; cv[1] += ap[1] * bv; cv[2] += ap[2] * bv; cv[3] += ap[3] * bv; cv[4] += ap[4] * bv; cv[5] += ap[5] * bv; cv[6] += ap[6] * bv; cv[7] += ap[7] * bv; cv[8] += ap[8] * bv; cv[9] += ap[9] * bv; cv[10] += ap[10] * bv; cv[11] += ap[11] * bv; cv[12] += ap[12] * bv; cv[13] += ap[13] * bv; cv[14] += ap[14] * bv; cv[15] += ap[15] * bv; ap += BLOCK_SIZE; bp += wB; } // Synchronize to make sure the matrices are loaded __syncthreads(); // swap As and As2 float *prefetch_temp = prefetch; prefetch = prefetch2; prefetch2 = prefetch_temp; } // Write the block sub-matrix to device memory; // each thread writes one element float *Cp = &C[cBegin]; Cp += BLOCK_SIZE * ty + tx; int cStep = wB; #pragma unroll for(int i=0; i<BLOCK_SIZE; i++){ Cp[0] = cv[i]; Cp += cStep; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
4a7b8501770ee7e1990d7699af5b2b3b782918c5.cu
/* * Zhenyu Ye * [email protected] * Eindhoven University of Technology */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_PREFETCH_H_ #define _MATRIXMUL_PREFETCH_H_ #include <stdio.h> #include "matrixMul.h" #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// __global__ void matrixMul_prefetch( float* C, float* A, float* B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE * BLOCK_SIZE]; __shared__ float As2[BLOCK_SIZE * BLOCK_SIZE]; float *prefetch = As; float *prefetch2 = As2; // Declaration of the shared memory array Bs used to // store the sub-matrix of B // __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; float cv[BLOCK_SIZE] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * VECTOR_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; int cBegin = wB * BLOCK_SIZE * by + VECTOR_SIZE * BLOCK_SIZE * bx; // Csub is used to store the element of the block sub-matrix // that is computed by the thread // float Csub = 0; float *Ap = &A[aBegin + wA * ty +tx]; float *ap = &prefetch[ty + BLOCK_SIZE * tx]; #pragma unroll for(int i = 0; i < 16; i+=4){ ap[i] = Ap[wA * i]; } __syncthreads(); // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix Ap = &A[a + aStep + wA * ty +tx]; float *ap2 = &prefetch2[ty + BLOCK_SIZE * tx]; #pragma unroll for(int i = 0; i < 16; i+=4){ ap2[i] = Ap[wA * i]; } ap = &prefetch[0]; float *bp = &B[b + BLOCK_SIZE * ty + tx]; #pragma unroll for(int i = 0; i < BLOCK_SIZE; i++){ float bv = bp[0]; cv[0] += ap[0] * bv; cv[1] += ap[1] * bv; cv[2] += ap[2] * bv; cv[3] += ap[3] * bv; cv[4] += ap[4] * bv; cv[5] += ap[5] * bv; cv[6] += ap[6] * bv; cv[7] += ap[7] * bv; cv[8] += ap[8] * bv; cv[9] += ap[9] * bv; cv[10] += ap[10] * bv; cv[11] += ap[11] * bv; cv[12] += ap[12] * bv; cv[13] += ap[13] * bv; cv[14] += ap[14] * bv; cv[15] += ap[15] * bv; ap += BLOCK_SIZE; bp += wB; } // Synchronize to make sure the matrices are loaded __syncthreads(); // swap As and As2 float *prefetch_temp = prefetch; prefetch = prefetch2; prefetch2 = prefetch_temp; } // Write the block sub-matrix to device memory; // each thread writes one element float *Cp = &C[cBegin]; Cp += BLOCK_SIZE * ty + tx; int cStep = wB; #pragma unroll for(int i=0; i<BLOCK_SIZE; i++){ Cp[0] = cv[i]; Cp += cStep; } } #endif // #ifndef _MATRIXMUL_KERNEL_H_
43a5068e178bde7ee86e7330e3ae65b40996c3a2.hip
// !!! This is a file automatically generated by hipify!!! #ifndef CUCCL_NP_CU #define CUCCL_NP_CU #include "CUCCL_NP.cuh" #include <host_defines.h> #include <device_launch_parameters.h> #include <cmath> #include <hip/hip_runtime.h> #include <iostream> #include <iomanip> #include "../my_functions.h" namespace CUCCL { const int BLOCK = 8; __device__ int atom_MIN(int a, int b) { //atomic operation minimum if (a < b) return a; else return b; } __global__ void InitCCL(int L_d[], int width, int height) { // interger Ld[N] N = width * height int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; int id = x + y * width; L_d[id] = id; // do in parallel on the device using N threads: initialise Ld[0 . . . N 1] such that Ld[i] <-- i } __global__ void kernel4(int D_d[], int L_d[], bool* m_d, int N, int width, int height, int flthreshold) { // This is GPU kernel for 4-conectivity // m_d : examine this boolean value to determine if another iteration of the algorithm is required int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int threshold = flthreshold * INT_MAX; if (x >= width || y >= height) return; int id = x + y * width; // declare id: id <-- threadID & blockID from CUDA runtime int label = (int)(D_d[id] * INT_MAX); // label <-- L[id] int minlabel = N; // N = width * height (max possible label) // Finding minimum connected label bool up = id - width >= 0 && abs(label - (int)(D_d[id - width] * INT_MAX)) <= threshold; bool down = id + width < N && abs(label - (int)(D_d[id + width] * INT_MAX)) <= threshold; bool left = id % width && abs(label - (int)(D_d[id - 1] * INT_MAX)) <= threshold; bool right = id % width + 1 != width && abs(label - (int)(D_d[id + 1] * INT_MAX)) <= threshold; // up if (up == true) //n_id[0] minlabel = atom_MIN(minlabel, L_d[id - width]); // down if (down == true) //n_id[1] minlabel = atom_MIN(minlabel, L_d[id + width]); // left if (left) //n_id[2] minlabel = atom_MIN(minlabel, L_d[id - 1]); // right if (right) //n_id[3] minlabel = atom_MIN(minlabel, L_d[id + 1]); if (minlabel < L_d[id]) // Changes happens and another iteration of the algorithm is required { L_d[id] = minlabel; // L[id] <-- minlabel *m_d = true; } } __global__ void kernel8(int D_d[], int L_d[], bool* m_d, int N, int width, int height, int flthreshold) { // This is GPU kernel for 8-conectivity // m_d : examine this boolean value to determine if another iteration of the algorithm is required int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int threshold = flthreshold * INT_MAX; if (x >= width || y >= height) return; int id = x + y * width; // declare id: id <-- threadID & blockID from CUDA runtime int label = (int)(D_d[id] * INT_MAX); // label <-- L[id] int minlabel = N; // N = width * height (max possible label) // Finding minimum connected label bool up = id - width >= 0 && abs(label - (int)(D_d[id - width] * INT_MAX)) <= threshold; bool down = id + width < N && abs(label - (int)(D_d[id + width] * INT_MAX)) <= threshold; bool left = id % width; bool right = id % width + 1 != width; // up if (up) //n_id[0] minlabel = atom_MIN(minlabel, L_d[id - width]); // down if (down) //n_id[1] minlabel = atom_MIN(minlabel, L_d[id + width]); // left 1,2,3 if (left) { if (abs(label - (int)(D_d[id - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - 1]); //n_id[2] if (id - width - 1 >= 0 && abs(label - (int)(D_d[id - width - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - width - 1]);//n_id[3] if (id + width - 1 < N && abs(label -(int)(D_d[id + width - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + width - 1]);//n_id[4] } // right 1,2,3 if (right) { if (abs(label - (int)(D_d[id + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + 1]);//n_id[5] if (id - width + 1 >= 0 && abs(label - (int)(D_d[id - width + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - width + 1]);//n_id[6] if (id + width + 1 < N && abs(label - (int)(D_d[id + width + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + width + 1]);//n_id[7] } if (minlabel < L_d[id]) // Changes happens and another iteration of the algorithm is required { L_d[id] = minlabel; // L[id] <-- minlabel *m_d = true; } } void CCLNPGPU::CudaCCL(int* frame, int* labels, int width, int height, int degreeOfConnectivity, int threshold) { auto N = width * height; //declare integer Ld[N] hipMalloc(reinterpret_cast<void**>(&LabelListOnDevice), sizeof(int) * N); hipMalloc(reinterpret_cast<void**>(&FrameDataOnDevice), sizeof(int) * N); hipMemcpy(FrameDataOnDevice, frame, sizeof(int) * N, hipMemcpyHostToDevice); bool* m_d; // declare boolean md in device memory hipMalloc(reinterpret_cast<void**>(&m_d), sizeof(bool)); dim3 grid((width + BLOCK - 1) / BLOCK, (height + BLOCK - 1) / BLOCK); dim3 threads(BLOCK, BLOCK); hipDeviceSynchronize(); { Timer stopwatch("Init: "); InitCCL << <grid, threads >> > (LabelListOnDevice, width, height); hipDeviceSynchronize(); } //do in parallel on the device using N threads: initialise Ld[0 . . . N 1] such that Ld[i] <-- i /* auto initLabel = static_cast<int*>(malloc(sizeof(int) * width * height)); // get initial label to print hipMemcpy(initLabel, LabelListOnDevice, sizeof(int) * width * height, hipMemcpyDeviceToHost); /*std::cout << "Init labels:" << std::endl; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j) { std::cout << std::setw(3) << initLabel[i * width + j] << " "; } std::cout << std::endl; } std::cout << std::endl; free(initLabel); */ hipDeviceSynchronize(); { while (true) { // repeat // do in parallel on the device using N threads: call Mesh Kernel A(Dd, Ld, md) // until md = false auto markFalgOnHost = false; hipMemcpy(m_d, &markFalgOnHost, sizeof(bool), hipMemcpyHostToDevice); if (degreeOfConnectivity == 4) { kernel4 << < grid, threads >> > (FrameDataOnDevice, LabelListOnDevice, m_d, N, width, height, threshold); // Mesh_Kernel_A hipDeviceSynchronize(); } else kernel8 << < grid, threads >> > (FrameDataOnDevice, LabelListOnDevice, m_d, N, width, height, threshold); //Mesh_Kernel_A hipDeviceSynchronize(); hipMemcpy(&markFalgOnHost, m_d, sizeof(bool), hipMemcpyDeviceToHost); if (markFalgOnHost) { hipDeviceSynchronize(); } else { break; } } } hipMemcpy(labels, LabelListOnDevice, sizeof(int) * N, hipMemcpyDeviceToHost); hipFree(FrameDataOnDevice); hipFree(LabelListOnDevice); } } #endif
43a5068e178bde7ee86e7330e3ae65b40996c3a2.cu
#ifndef CUCCL_NP_CU #define CUCCL_NP_CU #include "CUCCL_NP.cuh" #include <host_defines.h> #include <device_launch_parameters.h> #include <cmath> #include <cuda_runtime.h> #include <iostream> #include <iomanip> #include "../my_functions.h" namespace CUCCL { const int BLOCK = 8; __device__ int atom_MIN(int a, int b) { //atomic operation minimum if (a < b) return a; else return b; } __global__ void InitCCL(int L_d[], int width, int height) { // interger Ld[N] N = width * height int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= width || y >= height) return; int id = x + y * width; L_d[id] = id; // do in parallel on the device using N threads: initialise Ld[0 . . . N − 1] such that Ld[i] <-- i } __global__ void kernel4(int D_d[], int L_d[], bool* m_d, int N, int width, int height, int flthreshold) { // This is GPU kernel for 4-conectivity // m_d : examine this boolean value to determine if another iteration of the algorithm is required int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int threshold = flthreshold * INT_MAX; if (x >= width || y >= height) return; int id = x + y * width; // declare id: id <-- threadID & blockID from CUDA runtime int label = (int)(D_d[id] * INT_MAX); // label <-- L[id] int minlabel = N; // N = width * height (max possible label) // Finding minimum connected label bool up = id - width >= 0 && abs(label - (int)(D_d[id - width] * INT_MAX)) <= threshold; bool down = id + width < N && abs(label - (int)(D_d[id + width] * INT_MAX)) <= threshold; bool left = id % width && abs(label - (int)(D_d[id - 1] * INT_MAX)) <= threshold; bool right = id % width + 1 != width && abs(label - (int)(D_d[id + 1] * INT_MAX)) <= threshold; // up if (up == true) //n_id[0] minlabel = atom_MIN(minlabel, L_d[id - width]); // down if (down == true) //n_id[1] minlabel = atom_MIN(minlabel, L_d[id + width]); // left if (left) //n_id[2] minlabel = atom_MIN(minlabel, L_d[id - 1]); // right if (right) //n_id[3] minlabel = atom_MIN(minlabel, L_d[id + 1]); if (minlabel < L_d[id]) // Changes happens and another iteration of the algorithm is required { L_d[id] = minlabel; // L[id] <-- minlabel *m_d = true; } } __global__ void kernel8(int D_d[], int L_d[], bool* m_d, int N, int width, int height, int flthreshold) { // This is GPU kernel for 8-conectivity // m_d : examine this boolean value to determine if another iteration of the algorithm is required int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int threshold = flthreshold * INT_MAX; if (x >= width || y >= height) return; int id = x + y * width; // declare id: id <-- threadID & blockID from CUDA runtime int label = (int)(D_d[id] * INT_MAX); // label <-- L[id] int minlabel = N; // N = width * height (max possible label) // Finding minimum connected label bool up = id - width >= 0 && abs(label - (int)(D_d[id - width] * INT_MAX)) <= threshold; bool down = id + width < N && abs(label - (int)(D_d[id + width] * INT_MAX)) <= threshold; bool left = id % width; bool right = id % width + 1 != width; // up if (up) //n_id[0] minlabel = atom_MIN(minlabel, L_d[id - width]); // down if (down) //n_id[1] minlabel = atom_MIN(minlabel, L_d[id + width]); // left 1,2,3 if (left) { if (abs(label - (int)(D_d[id - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - 1]); //n_id[2] if (id - width - 1 >= 0 && abs(label - (int)(D_d[id - width - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - width - 1]);//n_id[3] if (id + width - 1 < N && abs(label -(int)(D_d[id + width - 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + width - 1]);//n_id[4] } // right 1,2,3 if (right) { if (abs(label - (int)(D_d[id + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + 1]);//n_id[5] if (id - width + 1 >= 0 && abs(label - (int)(D_d[id - width + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id - width + 1]);//n_id[6] if (id + width + 1 < N && abs(label - (int)(D_d[id + width + 1] * INT_MAX)) <= threshold) minlabel = atom_MIN(minlabel, L_d[id + width + 1]);//n_id[7] } if (minlabel < L_d[id]) // Changes happens and another iteration of the algorithm is required { L_d[id] = minlabel; // L[id] <-- minlabel *m_d = true; } } void CCLNPGPU::CudaCCL(int* frame, int* labels, int width, int height, int degreeOfConnectivity, int threshold) { auto N = width * height; //declare integer Ld[N] cudaMalloc(reinterpret_cast<void**>(&LabelListOnDevice), sizeof(int) * N); cudaMalloc(reinterpret_cast<void**>(&FrameDataOnDevice), sizeof(int) * N); cudaMemcpy(FrameDataOnDevice, frame, sizeof(int) * N, cudaMemcpyHostToDevice); bool* m_d; // declare boolean md in device memory cudaMalloc(reinterpret_cast<void**>(&m_d), sizeof(bool)); dim3 grid((width + BLOCK - 1) / BLOCK, (height + BLOCK - 1) / BLOCK); dim3 threads(BLOCK, BLOCK); cudaDeviceSynchronize(); { Timer stopwatch("Init: "); InitCCL << <grid, threads >> > (LabelListOnDevice, width, height); cudaDeviceSynchronize(); } //do in parallel on the device using N threads: initialise Ld[0 . . . N − 1] such that Ld[i] <-- i /* auto initLabel = static_cast<int*>(malloc(sizeof(int) * width * height)); // get initial label to print cudaMemcpy(initLabel, LabelListOnDevice, sizeof(int) * width * height, cudaMemcpyDeviceToHost); /*std::cout << "Init labels:" << std::endl; for (auto i = 0; i < height; ++i) { for (auto j = 0; j < width; ++j) { std::cout << std::setw(3) << initLabel[i * width + j] << " "; } std::cout << std::endl; } std::cout << std::endl; free(initLabel); */ cudaDeviceSynchronize(); { while (true) { // repeat // do in parallel on the device using N threads: call Mesh Kernel A(Dd, Ld, md) // until md = false auto markFalgOnHost = false; cudaMemcpy(m_d, &markFalgOnHost, sizeof(bool), cudaMemcpyHostToDevice); if (degreeOfConnectivity == 4) { kernel4 << < grid, threads >> > (FrameDataOnDevice, LabelListOnDevice, m_d, N, width, height, threshold); // Mesh_Kernel_A cudaThreadSynchronize(); } else kernel8 << < grid, threads >> > (FrameDataOnDevice, LabelListOnDevice, m_d, N, width, height, threshold); //Mesh_Kernel_A cudaThreadSynchronize(); cudaMemcpy(&markFalgOnHost, m_d, sizeof(bool), cudaMemcpyDeviceToHost); if (markFalgOnHost) { cudaThreadSynchronize(); } else { break; } } } cudaMemcpy(labels, LabelListOnDevice, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaFree(FrameDataOnDevice); cudaFree(LabelListOnDevice); } } #endif
0d4629c827c4be54c9739300582e4ff22f34446b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/symmetric_dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SymmetricDropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index % (n/2)] > threshold) * scale; } } template <typename Dtype> void SymmetricDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count/2, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SymmetricDropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void SymmetricDropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index % (n/2)] > threshold); } } template <typename Dtype> void SymmetricDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SymmetricDropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SymmetricDropoutLayer); } // namespace caffe
0d4629c827c4be54c9739300582e4ff22f34446b.cu
#include <vector> #include "caffe/layers/symmetric_dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SymmetricDropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index % (n/2)] > threshold) * scale; } } template <typename Dtype> void SymmetricDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count/2, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) SymmetricDropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void SymmetricDropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index % (n/2)] > threshold); } } template <typename Dtype> void SymmetricDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) SymmetricDropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SymmetricDropoutLayer); } // namespace caffe
0ac2d8b41a12d92fcb27274ee8d33b0ae9251383.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include <stdio.h> #include "opencv2/opencv.hpp" using namespace cv; using namespace std; /* Compile and run instructions: Do not use nvcc. Use the two commands below to compile. cmake . make To run the program => ./cudaProj */ #define THREADS_PER_BLOCK 512 #define N (4*4) #define TILE_DIM 32; #define BLOCK_ROWS 8; #define NUM_REPS 100; __global__ void transpose(int *a, int *b){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) a[(y+j)*width + x] = b[(y+j)*width + x]; } __global__ void transposeImage(uchar3 * const d_in, unsigned char * const d_out, uint imgheight, uint imgwidth){ const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; for (int j = 0; j < blockDim.x; j+= imgwidth){ d_out[idx*imgwidth + (idy+j)] = d_in[(idy+j)*imgwidth + idx]; } } int main(int argc, char **argv){ const int nx = 1024; const int ny = 1024; const int mem_size = nx*ny*sizeof(float); dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); float *d_adata = (float*)malloc(mem_size); float *d_bdata = (float*)malloc(mem_size); hipLaunchKernelGGL(( transpose), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata); for (int i = 0; i < NUM_REPS; i++) hipLaunchKernelGGL(( transposeNaive), dim3(dimGrid), dim3(dimBlock), 0, 0, d_tdata, d_idata); free(d_adata); free(d_bdata); Mat srcImage = imread("./e1.jpg"); const uint imgheight = srcImage.rows; const uint imgwidth = srcImage.cols; Mat inputImage(imgheight, imgwidth, CV_8UC3); Mat outputImage(imgwidth, imgheight , CV_8UC3); uchar3 *d_in; unsigned char *d_out; hipMalloc((void**)&d_in, imgheight*imgwidth*sizeof(uchar3)); hipMalloc((void**)&d_out, imgheight*imgwidth*sizeof(unsigned char)); hipMemcpy(d_in, srcImage.data, imgheight*imgwidth*sizeof(uchar3), hipMemcpyHostToDevice); dim3 threadsPerBlock(32, 32); dim3 blocksPerGrid((imgwidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgheight + threadsPerBlock.y - 1) / threadsPerBlock.y); transposeImage<< <blocksPerGrid, threadsPerBlock>> >(d_in, d_out, imgheight, imgwidth); hipMemcpy(outputImage.data, d_out, imgheight*imgwidth*sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(d_out); imwrite("transposeImage.jpg",outputImage); return 0; }
0ac2d8b41a12d92fcb27274ee8d33b0ae9251383.cu
#include <iostream> #include <time.h> #include <stdio.h> #include "opencv2/opencv.hpp" using namespace cv; using namespace std; /* Compile and run instructions: Do not use nvcc. Use the two commands below to compile. cmake . make To run the program => ./cudaProj */ #define THREADS_PER_BLOCK 512 #define N (4*4) #define TILE_DIM 32; #define BLOCK_ROWS 8; #define NUM_REPS 100; __global__ void transpose(int *a, int *b){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) a[(y+j)*width + x] = b[(y+j)*width + x]; } __global__ void transposeImage(uchar3 * const d_in, unsigned char * const d_out, uint imgheight, uint imgwidth){ const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; for (int j = 0; j < blockDim.x; j+= imgwidth){ d_out[idx*imgwidth + (idy+j)] = d_in[(idy+j)*imgwidth + idx]; } } int main(int argc, char **argv){ const int nx = 1024; const int ny = 1024; const int mem_size = nx*ny*sizeof(float); dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); float *d_adata = (float*)malloc(mem_size); float *d_bdata = (float*)malloc(mem_size); transpose<<<dimGrid, dimBlock>>>(d_tdata, d_idata); for (int i = 0; i < NUM_REPS; i++) transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata); free(d_adata); free(d_bdata); Mat srcImage = imread("./e1.jpg"); const uint imgheight = srcImage.rows; const uint imgwidth = srcImage.cols; Mat inputImage(imgheight, imgwidth, CV_8UC3); Mat outputImage(imgwidth, imgheight , CV_8UC3); uchar3 *d_in; unsigned char *d_out; cudaMalloc((void**)&d_in, imgheight*imgwidth*sizeof(uchar3)); cudaMalloc((void**)&d_out, imgheight*imgwidth*sizeof(unsigned char)); cudaMemcpy(d_in, srcImage.data, imgheight*imgwidth*sizeof(uchar3), cudaMemcpyHostToDevice); dim3 threadsPerBlock(32, 32); dim3 blocksPerGrid((imgwidth + threadsPerBlock.x - 1) / threadsPerBlock.x, (imgheight + threadsPerBlock.y - 1) / threadsPerBlock.y); transposeImage<< <blocksPerGrid, threadsPerBlock>> >(d_in, d_out, imgheight, imgwidth); cudaMemcpy(outputImage.data, d_out, imgheight*imgwidth*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(d_out); imwrite("transposeImage.jpg",outputImage); return 0; }
3f28d5dc8a3f8d1ca1708dfd02d613fabe6d1601.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(hipGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, stream, losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(hipGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { hipLaunchKernelGGL(( SigmoidFocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, stream, d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return d_logits; }
3f28d5dc8a3f8d1ca1708dfd02d613fabe6d1601.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This file is modified from https://github.com/pytorch/pytorch/blob/master/modules/detectron/sigmoid_focal_loss_op.cu // Cheng-Yang Fu // [email protected] #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #include <cfloat> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __global__ void SigmoidFocalLossForward(const int nthreads, const T* logits, const int* targets, const int num_classes, const float gamma, const float alpha, const int num, T* losses) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80]; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**gamma * log(p) where T term1 = powf((1. - p), gamma) * logf(max(p, FLT_MIN)); // p**gamma * log(1-p) T term2 = powf(p, gamma) * (-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))); losses[i] = 0.0; losses[i] += -c1 * term1 * zp; losses[i] += -c2 * term2 * zn; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossForward template <typename T> __global__ void SigmoidFocalLossBackward(const int nthreads, const T* logits, const int* targets, const T* d_losses, const int num_classes, const float gamma, const float alpha, const int num, T* d_logits) { CUDA_1D_KERNEL_LOOP(i, nthreads) { int n = i / num_classes; int d = i % num_classes; // current class[0~79]; int t = targets[n]; // target class [1~80], 0 is background; // Decide it is positive or negative case. T c1 = (t == (d+1)); T c2 = (t>=0 & t != (d+1)); T zn = (1.0 - alpha); T zp = (alpha); // p = 1. / 1. + expf(-x); p = sigmoid(x) T p = 1. / (1. + expf(-logits[i])); // (1-p)**g * (1 - p - g*p*log(p) T term1 = powf((1. - p), gamma) * (1. - p - (p * gamma * logf(max(p, FLT_MIN)))); // (p**g) * (g*(1-p)*log(1-p) - p) T term2 = powf(p, gamma) * ((-1. * logits[i] * (logits[i] >= 0) - logf(1. + expf(logits[i] - 2. * logits[i] * (logits[i] >= 0)))) * (1. - p) * gamma - p); d_logits[i] = 0.0; d_logits[i] += -c1 * term1 * zp; d_logits[i] += -c2 * term2 * zn; d_logits[i] = d_logits[i] * d_losses[i]; } // CUDA_1D_KERNEL_LOOP } // SigmoidFocalLossBackward at::Tensor SigmoidFocalLoss_forward_cuda( const at::Tensor& logits, const at::Tensor& targets, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); auto losses = at::empty({num_samples, logits.size(1)}, logits.options()); auto losses_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)losses_size, 512L), 4096L)); dim3 block(512); if (losses.numel() == 0) { THCudaCheck(cudaGetLastError()); return losses; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_forward", [&] { SigmoidFocalLossForward<scalar_t><<<grid, block, 0, stream>>>( losses_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), num_classes, gamma, alpha, num_samples, losses.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return losses; } at::Tensor SigmoidFocalLoss_backward_cuda( const at::Tensor& logits, const at::Tensor& targets, const at::Tensor& d_losses, const int num_classes, const float gamma, const float alpha) { AT_ASSERTM(logits.type().is_cuda(), "logits must be a CUDA tensor"); AT_ASSERTM(targets.type().is_cuda(), "targets must be a CUDA tensor"); AT_ASSERTM(d_losses.type().is_cuda(), "d_losses must be a CUDA tensor"); AT_ASSERTM(logits.dim() == 2, "logits should be NxClass"); const int num_samples = logits.size(0); AT_ASSERTM(logits.size(1) == num_classes, "logits.size(1) should be num_classes"); auto d_logits = at::zeros({num_samples, num_classes}, logits.options()); auto d_logits_size = num_samples * logits.size(1); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)d_logits_size, 512L), 4096L)); dim3 block(512); if (d_logits.numel() == 0) { THCudaCheck(cudaGetLastError()); return d_logits; } AT_DISPATCH_FLOATING_TYPES(logits.type(), "SigmoidFocalLoss_backward", [&] { SigmoidFocalLossBackward<scalar_t><<<grid, block, 0, stream>>>( d_logits_size, logits.contiguous().data<scalar_t>(), targets.contiguous().data<int>(), d_losses.contiguous().data<scalar_t>(), num_classes, gamma, alpha, num_samples, d_logits.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return d_logits; }
47d0c0ac16efbcb821cd83657f1845323bef6c87.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathReduce.cuh" #include "THHTensor.hpp" #include "THHStream.hpp" #include "../generic/THCTensorMathReduce.cu" #include "../THCGenerateFloatType.h"
47d0c0ac16efbcb821cd83657f1845323bef6c87.cu
#include "../THCTensorMathReduce.cuh" #include "THCTensor.hpp" #include "THCStream.hpp" #include "../generic/THCTensorMathReduce.cu" #include "../THCGenerateFloatType.h"
bc8711d939920b1b1891431da2d99128cbcf8ab2.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "MachTrans.h" #include "ParamsMt.h" #include "Global.h" #include "utils.h" #include "WeightFactory.h" #include "HostMatrix.h" #include "CorpusReader.h" #include "BeamSearch.h" #include "MonolingReader.h" #include "cublasWrapper.h" using namespace cytonLib; namespace cytonMt { bool MachTrans::checkTrainStatus(bool tune) { double timeCost; XLLib::endTime(lastCheckTime, &timeCost); double likeli=sumTrainLikeli/nTrgWords; XLLib::printf(" s%.1e %s %0.0fw/s, lr:%.2e tr:%.3e", (double)nSents, checkTime().c_str(), nSrcWordsRaw/timeCost, lambda, likeli); XLLib::printf(" bestV:%.6f %.6f", likeliValidBest0, likeliValidBest1); Precision likeliValid=test(validCorpora); if(tune) { lastProbe=nSents; Precision margin=::max((Precision)params.probeMargin*lambda, (Precision)0.001); bool succeed=likeliValid>=likeliValidBest0+margin; bool succeedRelax=likeliValid>=likeliValidBest1; XLLib::printf(" inc:%.6f/%.6f %s%s", likeliValid-likeliValidBest0, margin, succeed?"s":"f", succeedRelax?"s":"f" ); string tModel=params.saveModel+XLLib::stringFormat("/model_epoch%02d_s%d_%s%.6f", epoch, global.batch, succeedRelax?"s":"f", likeliValid); while(tModel==bestModel) { tModel+="a"; } saveModel(tModel); XLLib::printf( " save:%s", XLLib::fileName(tModel).c_str()); savedModels.push_back(tModel); if(succeed) { numFails=0; likeliValidBest0=::max(likeliValid, likeliValidBest0); } else { numFails+=1; } XLLib::printf(" nFail:%d", numFails); if(numFails>=params.patience || epoch+1>=params.decayStart) //prepare for restart { lambdaReduced=true; likeliValidBest0=::max(likeliValidBest0, likeliValidBest1); } if(succeedRelax) { string oldBest=bestModel; bestModel=tModel; XLLib::fileLink(XLLib::fileName(tModel), XLLib::stringFormat("%s/model", params.saveModel.c_str())); likeliValidBest1=::max(likeliValid, likeliValidBest1); } else { if(numFails>=params.patience && bestModel!=tModel) { XLLib::printf(" loadM:%s", XLLib::fileName(bestModel).c_str()); NetworkMt::loadModel(bestModel); test(validCorpora); } } if(lambdaReduced) { XLLib::printf(" lrDecay"); lambda *= params.decayRate; numFails=0; if(!params.decayConti) { lambdaReduced=false; } } while(savedModels.size()>params.maxSaveModels) { for(int i=0; i<savedModels.size();i++) { string tm=savedModels.at(i); if(tm!=bestModel) { XLLib::fileRemove(tm); savedModels.erase(savedModels.begin()+i); break; } } } lastCheckTime=XLLib::startTime(); sumTrainLikeli=0; nSrcWordsRaw=0; nTrgWords=0; } XLLib::printfln(""); return false; } void MachTrans::learn(CorpusReader& corpus, bool updateParams) { XLLibTime epochStart=XLLib::startTime(); corpus.reset(); CorpusReadNode* node; int nPrintDetails=1; HostMatInt srcMat; HostMatInt trgMat; HostMatInt trgMatSoftmax; while(true) { bool read=corpus.read(node); bool probe=false; bool tune=true; probe=nSents-lastProbe>=probePeriod; if(!probe && epoch==params.epochStart && nPrintDetails<=1 && nSents-lastProbe >=batchSize*50*nPrintDetails) { nPrintDetails+=1; probe=true; tune=false; } if(probe) { bool exit=checkTrainStatus(tune); fflush(stdout); } if(!read) { break; } global.batch+=1; batch.setSrcTrg(node->srcMat, node->trgMat, node->factor); double tLikeli=train(lambda , updateParams); sumTrainLikeli+=tLikeli; nSents+=batchSize*node->factor; nSrcWordsRaw+=batch.hSrcMat.length(); nTrgWords+=batch.numTrgWords()*node->factor; } } double MachTrans::test(vector<CorpusReader>& corpora) { bool gbTestMode=true; std::swap(cytonLib::testMode, gbTestMode); double res=0; XLLib::printf(" valid:"); for(int i=0;i<corpora.size();i++) { double score=test(corpora[i]); if(i==0) { res=score; } else { XLLib::printf(" "); } XLLib::printf("%.6f", score); } std::swap(cytonLib::testMode, gbTestMode); return res; } double MachTrans::test(CorpusReader& corpus) { double timeStartEpoch=clock(); corpus.reset(); double sumLikeli=0; int numSents=0; int numSrcWords=0; int numTrgWords=0; CorpusReadNode* node; bool gbTestMode=true; std::swap(cytonLib::testMode,gbTestMode); while(corpus.read(node)) { batch.setSrcTrg(node->srcMat, node->trgMat, node->factor); double tLikeli=getScore()*node->factor; sumLikeli+=tLikeli; numSents+=batchSize*node->factor; numSrcWords+=batch.srcMat.length()*node->factor; numTrgWords+=batch.numTrgWords()*node->factor; } double likeliTrain=sumLikeli/numTrgWords; std::swap(cytonLib::testMode,gbTestMode); return likeliTrain; } string MachTrans::checkTime() { string res=XLLib::stringFormat("%s %s", XLLib::endTime(startTime).c_str(), XLLib::endTime(lastCheckTime).c_str()); return res; } void MachTrans::work() { startTime=XLLib::startTime(); string mode=params.mode; if(mode=="train") { workTrain(); } else if(mode=="translate") { workTest(); } else { XLLib::printfln("Unknown mode %s.",mode.c_str()); } } void MachTrans::loadModel(string& modelFile) { int i=modelFile.rfind('/'); if(i<0) { XLLib::printfln("Error: modelFile wrong %s", modelFile.c_str()); XLLib::printfln("modelFile must be modelDir/modelFile, and modelDir has settings vocab.sn vocab.tn."); } assert(i>=0); string modelDir=modelFile.substr(0, i+1); srcVocab.load(modelDir+"vocab.sn", 0); trgVocab.load(modelDir+"vocab.tn", 0); assert(params.srcVocabSize==srcVocab.size()); assert(params.trgVocabSize==trgVocab.size()); NetworkMt::init(); NetworkMt::loadModel(modelFile); } void MachTrans::workTrain() { cytonLib::testMode=false; if(XLLib::dirExists(params.saveModel)) { XLLib::printfln("Warning: model dir exists : %s . ", params.saveModel.c_str()); // exit(1); } XLLib::dirMake(params.saveModel); if(params.loadModel.empty()) { srcVocab.load(params.srcVocab, params.srcVocabSize); params.srcVocabSize=srcVocab.size(); trgVocab.load(params.trgVocab, params.trgVocabSize); params.trgVocabSize=trgVocab.size(); NetworkMt::init(); } else { loadModel(params.loadModel); } params.saveModelParams(params.saveModel+"settings"); srcVocab.save(params.saveModel+"vocab.sn"); trgVocab.save(params.saveModel+"vocab.tn"); XLLib::printfln("real vocabSize src %d, trg %d", srcVocab.size(), trgVocab.size()); CorpusReader trainCorpus; { vector<string> ts; XLLib::str2list(params.trainData,":", ts); // assert(ts.size()==2); for(int i=0; i<ts.size(); i+=3) { double factor=1.0; if(i+2<ts.size()) { factor=atof(ts.at(i+2).c_str()); } trainCorpus.init(ts.at(i), ts.at(i+1), srcVocab, trgVocab, params.ignoreUnk, batchSize, params.maxSeqLen, factor); } } { validCorpora.push_back(CorpusReader()); vector<string> ts; XLLib::str2list(params.devData,":", ts); validCorpora.back().init(ts.at(0),ts.at(1), srcVocab, trgVocab, params.ignoreUnk, batchSize, params.maxSeqLen); } Precision likeliValid=-10000; { XLLib::printf("initial"); likeliValid=test(validCorpora); XLLib::printfln(""); fflush(stdout); } bestModel=params.saveModel+XLLib::stringFormat("/model_epoch%d_s0%.6f",params.epochStart, likeliValid); saveModel(bestModel); XLLib::printfln("save:%s", XLLib::fileName(bestModel).c_str()); savedModels.push_back(bestModel); likeliValidBest0=likeliValid; likeliValidBest1=likeliValid; lastCheckTime=XLLib::startTime(); numFails=0; global.batch=0; nSents=0; nSrcWordsRaw=0; nTrgWords=0; lastProbe=0; lambda=params.learningRate; lambdaReduced=params.decayStatus; probePeriod=trainCorpus.nSents/params.probeFreq; XLLib::printfln("probePeriod %d sents (%d/%g)", probePeriod, trainCorpus.nSents, params.probeFreq); for(epoch=params.epochStart; epoch<=params.epochs; epoch++) { global.epoch=epoch; XLLib::printf("# e%d", epoch); learn(trainCorpus, true); } if(nSents-lastProbe>0) { checkTrainStatus(); } if(params.epochs>0){ XLLib::printfln("\nbestModel %s", bestModel.c_str()); NetworkMt::loadModel(bestModel); XLLib::printf("best "); double avgLikeliValid=test(validCorpora); XLLib::printf("\n %.16e", avgLikeliValid); } } void MachTrans::workTest() { cytonLib::batchSize=1; cytonLib::testMode=true; loadModel(params.loadModel); BeamSearch beamSearch; beamSearch.init(this, &trgVocab, params.beamSize, params.maxSeqLen, params.lenPenalty, params.embedSize, params.hiddenSize, params.numLayers); MonolingReader corpus; corpus.init(params.testInput, &srcVocab); corpus.reset(); ofstream fOutput; ostream *out; if(params.testOutput!="stdout") { fOutput.open(params.testOutput); out=&fOutput; } else { out=&std::cout; } int batchSize=1; int nIgnore=0; HostMatInt srcMat; prevApply(); double sumScore; int numSents=0; int numTrgWords=0; hipSetDevice(0); XLLib::printfln(global.os, "start translating.."); for(int i=0;;i++) { hipDeviceSynchronize(); string srcLine; int nRead=corpus.read_mini_batch(batchSize, params.ignoreUnk, srcMat, &srcLine); if(nRead==0) { break; } vector<string> trans; string detail; Precision score; int seqLen=srcMat.nj; if(seqLen>params.maxSeqLen) { nIgnore+=1; XLLib::printfln("%d-th (total %d) sentence is %d words, too long, so ignored.", i, nIgnore, seqLen); } else { if(srcMat.length()>0) { batch.setSrc(srcMat); detail=beamSearch.apply(trans, &score); } } sumScore+=score; numSents+=1; int tLen=trans.size()+1; numTrgWords+=tLen; XLLib::printfln(global.os, "%d %s => %e %s", i, srcLine.c_str(), score/tLen, detail.c_str()); (*out)<<XLLib::toString(trans)<<"\n"; } if(params.testOutput!="stdout") { fOutput.close(); } double avgScore=sumScore/numTrgWords; XLLib::printfln(global.os, "sentences %d, ignore %d, words %d, avgScore %e .", numSents, nIgnore, numTrgWords, avgScore); } }
bc8711d939920b1b1891431da2d99128cbcf8ab2.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "MachTrans.h" #include "ParamsMt.h" #include "Global.h" #include "utils.h" #include "WeightFactory.h" #include "HostMatrix.h" #include "CorpusReader.h" #include "BeamSearch.h" #include "MonolingReader.h" #include "cublasWrapper.h" using namespace cytonLib; namespace cytonMt { bool MachTrans::checkTrainStatus(bool tune) { double timeCost; XLLib::endTime(lastCheckTime, &timeCost); double likeli=sumTrainLikeli/nTrgWords; XLLib::printf(" s%.1e %s %0.0fw/s, lr:%.2e tr:%.3e", (double)nSents, checkTime().c_str(), nSrcWordsRaw/timeCost, lambda, likeli); XLLib::printf(" bestV:%.6f %.6f", likeliValidBest0, likeliValidBest1); Precision likeliValid=test(validCorpora); if(tune) { lastProbe=nSents; Precision margin=std::max((Precision)params.probeMargin*lambda, (Precision)0.001); bool succeed=likeliValid>=likeliValidBest0+margin; bool succeedRelax=likeliValid>=likeliValidBest1; XLLib::printf(" inc:%.6f/%.6f %s%s", likeliValid-likeliValidBest0, margin, succeed?"s":"f", succeedRelax?"s":"f" ); string tModel=params.saveModel+XLLib::stringFormat("/model_epoch%02d_s%d_%s%.6f", epoch, global.batch, succeedRelax?"s":"f", likeliValid); while(tModel==bestModel) { tModel+="a"; } saveModel(tModel); XLLib::printf( " save:%s", XLLib::fileName(tModel).c_str()); savedModels.push_back(tModel); if(succeed) { numFails=0; likeliValidBest0=std::max(likeliValid, likeliValidBest0); } else { numFails+=1; } XLLib::printf(" nFail:%d", numFails); if(numFails>=params.patience || epoch+1>=params.decayStart) //prepare for restart { lambdaReduced=true; likeliValidBest0=std::max(likeliValidBest0, likeliValidBest1); } if(succeedRelax) { string oldBest=bestModel; bestModel=tModel; XLLib::fileLink(XLLib::fileName(tModel), XLLib::stringFormat("%s/model", params.saveModel.c_str())); likeliValidBest1=std::max(likeliValid, likeliValidBest1); } else { if(numFails>=params.patience && bestModel!=tModel) { XLLib::printf(" loadM:%s", XLLib::fileName(bestModel).c_str()); NetworkMt::loadModel(bestModel); test(validCorpora); } } if(lambdaReduced) { XLLib::printf(" lrDecay"); lambda *= params.decayRate; numFails=0; if(!params.decayConti) { lambdaReduced=false; } } while(savedModels.size()>params.maxSaveModels) { for(int i=0; i<savedModels.size();i++) { string tm=savedModels.at(i); if(tm!=bestModel) { XLLib::fileRemove(tm); savedModels.erase(savedModels.begin()+i); break; } } } lastCheckTime=XLLib::startTime(); sumTrainLikeli=0; nSrcWordsRaw=0; nTrgWords=0; } XLLib::printfln(""); return false; } void MachTrans::learn(CorpusReader& corpus, bool updateParams) { XLLibTime epochStart=XLLib::startTime(); corpus.reset(); CorpusReadNode* node; int nPrintDetails=1; HostMatInt srcMat; HostMatInt trgMat; HostMatInt trgMatSoftmax; while(true) { bool read=corpus.read(node); bool probe=false; bool tune=true; probe=nSents-lastProbe>=probePeriod; if(!probe && epoch==params.epochStart && nPrintDetails<=1 && nSents-lastProbe >=batchSize*50*nPrintDetails) { nPrintDetails+=1; probe=true; tune=false; } if(probe) { bool exit=checkTrainStatus(tune); fflush(stdout); } if(!read) { break; } global.batch+=1; batch.setSrcTrg(node->srcMat, node->trgMat, node->factor); double tLikeli=train(lambda , updateParams); sumTrainLikeli+=tLikeli; nSents+=batchSize*node->factor; nSrcWordsRaw+=batch.hSrcMat.length(); nTrgWords+=batch.numTrgWords()*node->factor; } } double MachTrans::test(vector<CorpusReader>& corpora) { bool gbTestMode=true; std::swap(cytonLib::testMode, gbTestMode); double res=0; XLLib::printf(" valid:"); for(int i=0;i<corpora.size();i++) { double score=test(corpora[i]); if(i==0) { res=score; } else { XLLib::printf(" "); } XLLib::printf("%.6f", score); } std::swap(cytonLib::testMode, gbTestMode); return res; } double MachTrans::test(CorpusReader& corpus) { double timeStartEpoch=clock(); corpus.reset(); double sumLikeli=0; int numSents=0; int numSrcWords=0; int numTrgWords=0; CorpusReadNode* node; bool gbTestMode=true; std::swap(cytonLib::testMode,gbTestMode); while(corpus.read(node)) { batch.setSrcTrg(node->srcMat, node->trgMat, node->factor); double tLikeli=getScore()*node->factor; sumLikeli+=tLikeli; numSents+=batchSize*node->factor; numSrcWords+=batch.srcMat.length()*node->factor; numTrgWords+=batch.numTrgWords()*node->factor; } double likeliTrain=sumLikeli/numTrgWords; std::swap(cytonLib::testMode,gbTestMode); return likeliTrain; } string MachTrans::checkTime() { string res=XLLib::stringFormat("%s %s", XLLib::endTime(startTime).c_str(), XLLib::endTime(lastCheckTime).c_str()); return res; } void MachTrans::work() { startTime=XLLib::startTime(); string mode=params.mode; if(mode=="train") { workTrain(); } else if(mode=="translate") { workTest(); } else { XLLib::printfln("Unknown mode %s.",mode.c_str()); } } void MachTrans::loadModel(string& modelFile) { int i=modelFile.rfind('/'); if(i<0) { XLLib::printfln("Error: modelFile wrong %s", modelFile.c_str()); XLLib::printfln("modelFile must be modelDir/modelFile, and modelDir has settings vocab.sn vocab.tn."); } assert(i>=0); string modelDir=modelFile.substr(0, i+1); srcVocab.load(modelDir+"vocab.sn", 0); trgVocab.load(modelDir+"vocab.tn", 0); assert(params.srcVocabSize==srcVocab.size()); assert(params.trgVocabSize==trgVocab.size()); NetworkMt::init(); NetworkMt::loadModel(modelFile); } void MachTrans::workTrain() { cytonLib::testMode=false; if(XLLib::dirExists(params.saveModel)) { XLLib::printfln("Warning: model dir exists : %s . ", params.saveModel.c_str()); // exit(1); } XLLib::dirMake(params.saveModel); if(params.loadModel.empty()) { srcVocab.load(params.srcVocab, params.srcVocabSize); params.srcVocabSize=srcVocab.size(); trgVocab.load(params.trgVocab, params.trgVocabSize); params.trgVocabSize=trgVocab.size(); NetworkMt::init(); } else { loadModel(params.loadModel); } params.saveModelParams(params.saveModel+"settings"); srcVocab.save(params.saveModel+"vocab.sn"); trgVocab.save(params.saveModel+"vocab.tn"); XLLib::printfln("real vocabSize src %d, trg %d", srcVocab.size(), trgVocab.size()); CorpusReader trainCorpus; { vector<string> ts; XLLib::str2list(params.trainData,":", ts); // assert(ts.size()==2); for(int i=0; i<ts.size(); i+=3) { double factor=1.0; if(i+2<ts.size()) { factor=atof(ts.at(i+2).c_str()); } trainCorpus.init(ts.at(i), ts.at(i+1), srcVocab, trgVocab, params.ignoreUnk, batchSize, params.maxSeqLen, factor); } } { validCorpora.push_back(CorpusReader()); vector<string> ts; XLLib::str2list(params.devData,":", ts); validCorpora.back().init(ts.at(0),ts.at(1), srcVocab, trgVocab, params.ignoreUnk, batchSize, params.maxSeqLen); } Precision likeliValid=-10000; { XLLib::printf("initial"); likeliValid=test(validCorpora); XLLib::printfln(""); fflush(stdout); } bestModel=params.saveModel+XLLib::stringFormat("/model_epoch%d_s0%.6f",params.epochStart, likeliValid); saveModel(bestModel); XLLib::printfln("save:%s", XLLib::fileName(bestModel).c_str()); savedModels.push_back(bestModel); likeliValidBest0=likeliValid; likeliValidBest1=likeliValid; lastCheckTime=XLLib::startTime(); numFails=0; global.batch=0; nSents=0; nSrcWordsRaw=0; nTrgWords=0; lastProbe=0; lambda=params.learningRate; lambdaReduced=params.decayStatus; probePeriod=trainCorpus.nSents/params.probeFreq; XLLib::printfln("probePeriod %d sents (%d/%g)", probePeriod, trainCorpus.nSents, params.probeFreq); for(epoch=params.epochStart; epoch<=params.epochs; epoch++) { global.epoch=epoch; XLLib::printf("# e%d", epoch); learn(trainCorpus, true); } if(nSents-lastProbe>0) { checkTrainStatus(); } if(params.epochs>0){ XLLib::printfln("\nbestModel %s", bestModel.c_str()); NetworkMt::loadModel(bestModel); XLLib::printf("best "); double avgLikeliValid=test(validCorpora); XLLib::printf("\n %.16e", avgLikeliValid); } } void MachTrans::workTest() { cytonLib::batchSize=1; cytonLib::testMode=true; loadModel(params.loadModel); BeamSearch beamSearch; beamSearch.init(this, &trgVocab, params.beamSize, params.maxSeqLen, params.lenPenalty, params.embedSize, params.hiddenSize, params.numLayers); MonolingReader corpus; corpus.init(params.testInput, &srcVocab); corpus.reset(); ofstream fOutput; ostream *out; if(params.testOutput!="stdout") { fOutput.open(params.testOutput); out=&fOutput; } else { out=&std::cout; } int batchSize=1; int nIgnore=0; HostMatInt srcMat; prevApply(); double sumScore; int numSents=0; int numTrgWords=0; cudaSetDevice(0); XLLib::printfln(global.os, "start translating.."); for(int i=0;;i++) { cudaDeviceSynchronize(); string srcLine; int nRead=corpus.read_mini_batch(batchSize, params.ignoreUnk, srcMat, &srcLine); if(nRead==0) { break; } vector<string> trans; string detail; Precision score; int seqLen=srcMat.nj; if(seqLen>params.maxSeqLen) { nIgnore+=1; XLLib::printfln("%d-th (total %d) sentence is %d words, too long, so ignored.", i, nIgnore, seqLen); } else { if(srcMat.length()>0) { batch.setSrc(srcMat); detail=beamSearch.apply(trans, &score); } } sumScore+=score; numSents+=1; int tLen=trans.size()+1; numTrgWords+=tLen; XLLib::printfln(global.os, "%d %s => %e %s", i, srcLine.c_str(), score/tLen, detail.c_str()); (*out)<<XLLib::toString(trans)<<"\n"; } if(params.testOutput!="stdout") { fOutput.close(); } double avgScore=sumScore/numTrgWords; XLLib::printfln(global.os, "sentences %d, ignore %d, words %d, avgScore %e .", numSents, nIgnore, numTrgWords, avgScore); } }
44b4eb8e1df4ad6d7c15de92f9df1619c6ffb565.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> void gpu_edges (std::string filename) { BMP Background; Background.ReadFromFile(filename.c_str()); int height = Background.TellHeight(); int width = Background.TellWidth(); int depth = Background.TellBitDepth(); BMP Output(Background); hipError_t cuda_ret; int *weights_d; int weights[18]; //SET WEIGHTS: for(int i = 9; i < 18; i++){ weights[i] = 1; } weights[0] = -1; weights[1] = -1; weights[2] = -1; weights[3] = -1; weights[4] = 8; weights[5] = -1; weights[6] = -1; weights[7] = -1; weights[8] = -1; //WEIGHTS SET //Timing start hipEvent_t begin, end; float time; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord(begin, 0); ebmpBYTE *A_h, *B_h; ebmpBYTE *A_d, *B_d; A_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); B_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ A_h[(i*width+j)*3] = Background.Pixels[i][j].Blue; A_h[(i*width+j)*3+1] = Background.Pixels[i][j].Green; A_h[(i*width+j)*3+2] = Background.Pixels[i][j].Red; } } dim3 dim_grid, dim_block; hipMalloc((void**)&weights_d, sizeof(int)*18 ); hipMalloc((void**)&A_d, sizeof(ebmpBYTE)*width*height*3); hipMalloc((void**)&B_d, sizeof(ebmpBYTE)*width*height*3); hipDeviceSynchronize(); hipMemcpy(weights_d, &weights[0], sizeof(int)*18, hipMemcpyHostToDevice); hipMemcpy(A_d, A_h, sizeof(ebmpBYTE)*width*height*3, hipMemcpyHostToDevice); hipDeviceSynchronize(); dim3 DimGrid(1, 1, 1); dim3 DimBlock(1024, 1, 1); hipLaunchKernelGGL(( gpu_filter), dim3(DimGrid), dim3(DimBlock), 0, 0, A_d, B_d, weights_d, width, height); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) printf("error"); hipMemcpy(B_h, B_d, sizeof(ebmpBYTE)*width*height*3, hipMemcpyDeviceToHost); hipDeviceSynchronize(); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ Output.Pixels[i][j].Blue = B_h[(i*width+j)*3]; Output.Pixels[i][j].Green = B_h[(i*width+j)*3+1]; Output.Pixels[i][j].Red = B_h[(i*width+j)*3+2]; } } //Timing end hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&time, begin, end); printf("GPU Edges time: %f ms \n\n", time ); std::string fileout = filename; fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); string extra = "_gpu_edges.bmp"; fileout = fileout + extra; Output.WriteToFile(fileout.c_str()); free(A_h); free(B_h); hipFree(weights_d); hipFree(B_d); hipFree(A_d); return; }
44b4eb8e1df4ad6d7c15de92f9df1619c6ffb565.cu
#include <string> void gpu_edges (std::string filename) { BMP Background; Background.ReadFromFile(filename.c_str()); int height = Background.TellHeight(); int width = Background.TellWidth(); int depth = Background.TellBitDepth(); BMP Output(Background); cudaError_t cuda_ret; int *weights_d; int weights[18]; //SET WEIGHTS: for(int i = 9; i < 18; i++){ weights[i] = 1; } weights[0] = -1; weights[1] = -1; weights[2] = -1; weights[3] = -1; weights[4] = 8; weights[5] = -1; weights[6] = -1; weights[7] = -1; weights[8] = -1; //WEIGHTS SET //Timing start cudaEvent_t begin, end; float time; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord(begin, 0); ebmpBYTE *A_h, *B_h; ebmpBYTE *A_d, *B_d; A_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); B_h = (ebmpBYTE*) malloc( sizeof(ebmpBYTE)*width*height*3 ); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ A_h[(i*width+j)*3] = Background.Pixels[i][j].Blue; A_h[(i*width+j)*3+1] = Background.Pixels[i][j].Green; A_h[(i*width+j)*3+2] = Background.Pixels[i][j].Red; } } dim3 dim_grid, dim_block; cudaMalloc((void**)&weights_d, sizeof(int)*18 ); cudaMalloc((void**)&A_d, sizeof(ebmpBYTE)*width*height*3); cudaMalloc((void**)&B_d, sizeof(ebmpBYTE)*width*height*3); cudaDeviceSynchronize(); cudaMemcpy(weights_d, &weights[0], sizeof(int)*18, cudaMemcpyHostToDevice); cudaMemcpy(A_d, A_h, sizeof(ebmpBYTE)*width*height*3, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); dim3 DimGrid(1, 1, 1); dim3 DimBlock(1024, 1, 1); gpu_filter<<<DimGrid, DimBlock>>>(A_d, B_d, weights_d, width, height); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) printf("error"); cudaMemcpy(B_h, B_d, sizeof(ebmpBYTE)*width*height*3, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < height; i++){ for (int j = 0; j < width; j++){ Output.Pixels[i][j].Blue = B_h[(i*width+j)*3]; Output.Pixels[i][j].Green = B_h[(i*width+j)*3+1]; Output.Pixels[i][j].Red = B_h[(i*width+j)*3+2]; } } //Timing end cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&time, begin, end); printf("GPU Edges time: %f ms \n\n", time ); std::string fileout = filename; fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); fileout.pop_back(); string extra = "_gpu_edges.bmp"; fileout = fileout + extra; Output.WriteToFile(fileout.c_str()); free(A_h); free(B_h); cudaFree(weights_d); cudaFree(B_d); cudaFree(A_d); return; }
8738e8d86e8f2b2c9eb7cc5886cd8d4903b5e06e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cast_op.h" namespace caffe2 { namespace { template <typename DstType, typename SrcType> __global__ void CastKernel(const int N, const SrcType* X, DstType* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = static_cast<DstType>(X[i]); } } } // namespace template <> template <typename DstType, typename SrcType> bool CastOp<CUDAContext>::DoRunWithType() { auto& input = Input(0); auto* output = Output(0); output->ResizeLike(input); const auto* data = input.template data<SrcType>(); auto* out = output->template mutable_data<DstType>(); DCHECK(input.size() < INT_MAX); int N = input.size(); hipLaunchKernelGGL(( CastKernel<DstType, SrcType>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, data, out); return true; } REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>); } // namespace caffe2
8738e8d86e8f2b2c9eb7cc5886cd8d4903b5e06e.cu
#include "caffe2/core/context_gpu.h" #include "caffe2/operators/cast_op.h" namespace caffe2 { namespace { template <typename DstType, typename SrcType> __global__ void CastKernel(const int N, const SrcType* X, DstType* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = static_cast<DstType>(X[i]); } } } // namespace template <> template <typename DstType, typename SrcType> bool CastOp<CUDAContext>::DoRunWithType() { auto& input = Input(0); auto* output = Output(0); output->ResizeLike(input); const auto* data = input.template data<SrcType>(); auto* out = output->template mutable_data<DstType>(); DCHECK(input.size() < INT_MAX); int N = input.size(); CastKernel<DstType, SrcType><<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, data, out); return true; } REGISTER_CUDA_OPERATOR(Cast, CastOp<CUDAContext>); } // namespace caffe2
7b008cb5d6d6cfc9e39b5ec2b3b3a25669805933.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/explainer/kernel_shap.hpp> #include <test_utils.h> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <raft/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <test_utils.h> namespace MLCommon { } #include <gtest/gtest.h> namespace ML { namespace Explainer { struct MakeKSHAPDatasetInputs { int nrows_exact; int nrows_sampled; int ncols; int nrows_background; int max_samples; uint64_t seed; }; template <typename T> class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam(); stream = handle.get_stream(); int i, j; nrows_X = params.nrows_exact + params.nrows_sampled; rmm::device_uvector<T> background(params.nrows_background * params.ncols, stream); rmm::device_uvector<T> observation(params.ncols, stream); rmm::device_uvector<int> nsamples(params.nrows_sampled / 2, stream); rmm::device_uvector<float> X(nrows_X * params.ncols, stream); rmm::device_uvector<T> dataset(nrows_X * params.nrows_background * params.ncols, stream); thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background.data()); thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation.data()); thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples.data()); thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X.data()); thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset.data()); // Initialize arrays: // Aassign a sentinel value to the observation to check easily later T sent_value = nrows_X * params.nrows_background * params.ncols * 100; for (i = 0; i < params.ncols; i++) { o_ptr[i] = sent_value; } // Initialize background array with different odd value per row, makes // it easier to debug if something goes wrong. for (i = 0; i < params.nrows_background; i++) { for (j = 0; j < params.ncols; j++) { b_ptr[i * params.ncols + j] = (i * 2) + 1; } } // Initialize the exact part of X. We create 2 `1` values per row for the test thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0); for (i = 0; i < params.nrows_exact; i++) { for (j = i; j < i + 2; j++) { X_ptr[i * params.ncols + j] = (float)1.0; } } // Initialize the number of samples per row, we initialize each even row to // max samples and each odd row to max_samples - 1 for (i = 0; i < params.nrows_sampled / 2; i++) { n_ptr[i] = params.max_samples - i % 2; } kernel_dataset(handle, X.data(), nrows_X, params.ncols, background.data(), params.nrows_background, dataset.data(), observation.data(), nsamples.data(), params.nrows_sampled, params.max_samples, params.seed); handle.sync_stream(stream); int counter; // Check the generated part of X by sampling. The first nrows_exact // correspond to the exact part generated before, so we just test after that. test_sampled_X = true; j = 0; for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2; i += 2 * params.ncols) { // check that number of samples is the number indicated by nsamples. counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == n_ptr[j])); // check that number of samples of the next line is the compliment, // i.e. ncols - nsamples[j] counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j]))); j++; } // Check for the exact part of the generated dataset. test_scatter_exact = true; for (i = 0; i < params.nrows_exact; i++) { for (j = i * params.nrows_background * params.ncols; j < (i + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); // Check that indeed we have two observation entries ber row test_scatter_exact = test_scatter_exact && (counter == 2); if (not test_scatter_exact) { std::cout << "test_scatter_exact counter failed with: " << counter << ", expected value was 2." << std::endl; break; } } if (not test_scatter_exact) { break; } } // Check for the sampled part of the generated dataset test_scatter_sampled = true; // compliment_ctr is a helper counter to help check nrows_dataset per entry in // nsamples without complicating indexing since sampled part starts at nrows_sampled int compliment_ctr = 0; for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) { // First set of dataset observations must correspond to nsamples[i] for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]); } // The next set of samples must correspond to the compliment: ncols - nsamples[i] compliment_ctr++; for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { // Check that number of observation entries corresponds to nsamples. counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]); } } } protected: MakeKSHAPDatasetInputs params; int nrows_X; bool test_sampled_X; bool test_scatter_exact; bool test_scatter_sampled; raft::handle_t handle; hipStream_t stream = 0; }; const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL} }; typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF; TEST_P(MakeKSHAPDatasetTestF, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf)); const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL}}; typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD; TEST_P(MakeKSHAPDatasetTestD, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd)); } // end namespace Explainer } // end namespace ML
7b008cb5d6d6cfc9e39b5ec2b3b3a25669805933.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/explainer/kernel_shap.hpp> #include <test_utils.h> #include <raft/core/cudart_utils.hpp> #include <raft/core/handle.hpp> #include <raft/cuda_utils.cuh> #include <rmm/device_uvector.hpp> #include <thrust/count.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <test_utils.h> namespace MLCommon { } #include <gtest/gtest.h> namespace ML { namespace Explainer { struct MakeKSHAPDatasetInputs { int nrows_exact; int nrows_sampled; int ncols; int nrows_background; int max_samples; uint64_t seed; }; template <typename T> class MakeKSHAPDatasetTest : public ::testing::TestWithParam<MakeKSHAPDatasetInputs> { protected: void SetUp() override { params = ::testing::TestWithParam<MakeKSHAPDatasetInputs>::GetParam(); stream = handle.get_stream(); int i, j; nrows_X = params.nrows_exact + params.nrows_sampled; rmm::device_uvector<T> background(params.nrows_background * params.ncols, stream); rmm::device_uvector<T> observation(params.ncols, stream); rmm::device_uvector<int> nsamples(params.nrows_sampled / 2, stream); rmm::device_uvector<float> X(nrows_X * params.ncols, stream); rmm::device_uvector<T> dataset(nrows_X * params.nrows_background * params.ncols, stream); thrust::device_ptr<T> b_ptr = thrust::device_pointer_cast(background.data()); thrust::device_ptr<T> o_ptr = thrust::device_pointer_cast(observation.data()); thrust::device_ptr<int> n_ptr = thrust::device_pointer_cast(nsamples.data()); thrust::device_ptr<float> X_ptr = thrust::device_pointer_cast(X.data()); thrust::device_ptr<T> d_ptr = thrust::device_pointer_cast(dataset.data()); // Initialize arrays: // Aassign a sentinel value to the observation to check easily later T sent_value = nrows_X * params.nrows_background * params.ncols * 100; for (i = 0; i < params.ncols; i++) { o_ptr[i] = sent_value; } // Initialize background array with different odd value per row, makes // it easier to debug if something goes wrong. for (i = 0; i < params.nrows_background; i++) { for (j = 0; j < params.ncols; j++) { b_ptr[i * params.ncols + j] = (i * 2) + 1; } } // Initialize the exact part of X. We create 2 `1` values per row for the test thrust::fill(thrust::device, X_ptr, &X_ptr[nrows_X * params.ncols - 1], 0); for (i = 0; i < params.nrows_exact; i++) { for (j = i; j < i + 2; j++) { X_ptr[i * params.ncols + j] = (float)1.0; } } // Initialize the number of samples per row, we initialize each even row to // max samples and each odd row to max_samples - 1 for (i = 0; i < params.nrows_sampled / 2; i++) { n_ptr[i] = params.max_samples - i % 2; } kernel_dataset(handle, X.data(), nrows_X, params.ncols, background.data(), params.nrows_background, dataset.data(), observation.data(), nsamples.data(), params.nrows_sampled, params.max_samples, params.seed); handle.sync_stream(stream); int counter; // Check the generated part of X by sampling. The first nrows_exact // correspond to the exact part generated before, so we just test after that. test_sampled_X = true; j = 0; for (i = params.nrows_exact * params.ncols; i < nrows_X * params.ncols / 2; i += 2 * params.ncols) { // check that number of samples is the number indicated by nsamples. counter = thrust::count(&X_ptr[i], &X_ptr[i + params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == n_ptr[j])); // check that number of samples of the next line is the compliment, // i.e. ncols - nsamples[j] counter = thrust::count(&X_ptr[i + params.ncols], &X_ptr[i + 2 * params.ncols], 1); test_sampled_X = (test_sampled_X && (counter == (params.ncols - n_ptr[j]))); j++; } // Check for the exact part of the generated dataset. test_scatter_exact = true; for (i = 0; i < params.nrows_exact; i++) { for (j = i * params.nrows_background * params.ncols; j < (i + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); // Check that indeed we have two observation entries ber row test_scatter_exact = test_scatter_exact && (counter == 2); if (not test_scatter_exact) { std::cout << "test_scatter_exact counter failed with: " << counter << ", expected value was 2." << std::endl; break; } } if (not test_scatter_exact) { break; } } // Check for the sampled part of the generated dataset test_scatter_sampled = true; // compliment_ctr is a helper counter to help check nrows_dataset per entry in // nsamples without complicating indexing since sampled part starts at nrows_sampled int compliment_ctr = 0; for (i = params.nrows_exact; i < params.nrows_exact + params.nrows_sampled / 2; i++) { // First set of dataset observations must correspond to nsamples[i] for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == n_ptr[i - params.nrows_exact]); } // The next set of samples must correspond to the compliment: ncols - nsamples[i] compliment_ctr++; for (j = (i + compliment_ctr) * params.nrows_background * params.ncols; j < (i + compliment_ctr + 1) * params.nrows_background * params.ncols; j += params.ncols) { // Check that number of observation entries corresponds to nsamples. counter = thrust::count(&d_ptr[j], &d_ptr[j + params.ncols], sent_value); test_scatter_sampled = test_scatter_sampled && (counter == params.ncols - n_ptr[i - params.nrows_exact]); } } } protected: MakeKSHAPDatasetInputs params; int nrows_X; bool test_sampled_X; bool test_scatter_exact; bool test_scatter_sampled; raft::handle_t handle; cudaStream_t stream = 0; }; const std::vector<MakeKSHAPDatasetInputs> inputsf = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL} }; typedef MakeKSHAPDatasetTest<float> MakeKSHAPDatasetTestF; TEST_P(MakeKSHAPDatasetTestF, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestF, ::testing::ValuesIn(inputsf)); const std::vector<MakeKSHAPDatasetInputs> inputsd = {{10, 10, 12, 2, 3, 1234ULL}, {10, 0, 12, 2, 3, 1234ULL}, {100, 50, 200, 10, 10, 1234ULL}, {100, 0, 200, 10, 10, 1234ULL}, {0, 10, 12, 2, 3, 1234ULL}, {0, 50, 200, 10, 10, 1234ULL}}; typedef MakeKSHAPDatasetTest<double> MakeKSHAPDatasetTestD; TEST_P(MakeKSHAPDatasetTestD, Result) { ASSERT_TRUE(test_sampled_X); // todo (dgd): re-enable assertions // disabled due to a sporadic cuda 10.1 fail (by one value in one case!) // will be re-enabled soon after 0.17 release // ASSERT_TRUE(test_scatter_exact); // ASSERT_TRUE(test_scatter_sampled); } INSTANTIATE_TEST_CASE_P(MakeKSHAPDatasetTests, MakeKSHAPDatasetTestD, ::testing::ValuesIn(inputsd)); } // end namespace Explainer } // end namespace ML
3ea82c076787455fa55ce58b7eb4ea5ca7d0a7b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/compact_bilinear_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define CHECK_CUFFT(X) CHECK_EQ((X), HIPFFT_SUCCESS) // overloaded functions, to support float and double hipblasStatus_t cublasgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const float *alpha, const float *A, int lda, const float *beta, const float *B, int ldb, float *C, int ldc) { return hipblasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } hipblasStatus_t cublasgeam(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, const double *alpha, const double *A, int lda, const double *beta, const double *B, int ldb, double *C, int ldc) { return hipblasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } // caffe wrapper of transpose function // dst=src^T, with the src size being M*N template<typename Dtype> void caffe_gpu_transpose(int M, const int N, const Dtype* src, Dtype* dst) { CHECK(src != dst) << "support out of place transpose only"; Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgeam(Caffe::cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, M, N, &alpha, src, N, &beta, dst, M, dst, M), HIPBLAS_STATUS_SUCCESS); } template<typename Dtype> void transpose_batch(const int batchlen, const int M, const int N, const Dtype* src, Dtype* dst) { const int step = M * N; for (int ins = 0; ins < batchlen; ++ins) caffe_gpu_transpose(M, N, src + ins * step, dst + ins * step); } // wrappers to deal with atomic add of double __device__ void caffe_atomic_add(float* dst, float val) { atomicAdd(dst, val); } __device__ void caffe_atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT_NEXT_LINE(runtime/int) unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } // do the getCount and do transpose along the way // should clear top to 0 before call template<typename Dtype> __global__ void GPUCountAndTranspose(const int nthreads, const int * hh, const Dtype * ss, const Dtype* bottom, Dtype* top, const int hw, const int C, const int num_output_) { // input batchlen*C*hw // output batchlen*hw*num_output, the transpose of the original output CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of things you need to do // index is the current INPUT point to be computed int left = index % (C * hw); const int ibatch = index / (C * hw); const int ic = left / hw; const int ihw = left % hw; // get the target location const int target = ibatch * (hw * num_output_) + ihw * num_output_ + hh[ic]; // atomic add only supports float not double caffe_atomic_add(top + target, ss[ic] * bottom[index]); } } // some wrappers around cufftExec // float forward hipfftResult cufftExec(hipfftHandle plan, const float *idata, CaffeComplex<float> *odata) { return hipfftExecR2C(plan, reinterpret_cast<hipfftReal*>(const_cast<float*>(idata)), reinterpret_cast<hipfftComplex*>(odata)); } // double forward hipfftResult cufftExec(hipfftHandle plan, const double *idata, CaffeComplex<double> *odata) { return hipfftExecD2Z(plan, reinterpret_cast<hipfftDoubleReal*>(const_cast<double*>(idata)), reinterpret_cast<hipfftDoubleComplex*>(odata)); } // float inverse hipfftResult cufftExec(hipfftHandle plan, const CaffeComplex<float> *idata, float *odata) { return hipfftExecC2R(plan, reinterpret_cast<hipfftComplex*>( const_cast<CaffeComplex<float>*>(idata)), reinterpret_cast<hipfftReal*>(odata)); } // double inverse hipfftResult cufftExec(hipfftHandle plan, const CaffeComplex<double> *idata, double *odata) { return hipfftExecZ2D(plan, reinterpret_cast<hipfftDoubleComplex*>( const_cast<CaffeComplex<double>*>(idata)), reinterpret_cast<hipfftDoubleReal*>(odata)); } // call cufft to do batch*nffts // hipfftReal* src; hipfftComplex *output template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_fft(const int batchlen, const int hw, const int nfft, const Dtype* src, CaffeComplex<Dtype>* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_noinv_batch, src, output)); } else { const int step_in = hw * nfft; const int step_out = hw * (floor(1.0 * nfft / 2) + 1); for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_noinv_1, src + step_in * i, output + step_out * i)); } } } template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_ifft(const int batchlen, const int hw, const int nfft, const CaffeComplex<Dtype>* src, Dtype* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_inv_batch, src, output)); } else { const int step_in = hw * (floor(1.0 * nfft / 2) + 1); const int step_out = hw * nfft; for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_inv_1, src + step_in * i, output + step_out * i)); } } } // Complex multiplication template<typename Dtype> static __device__ __host__ inline CaffeComplex<Dtype> ComplexMul( const CaffeComplex<Dtype> &a, const CaffeComplex<Dtype> &b) { CaffeComplex<Dtype> c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // entrywise multiplication: y[i]=a[i]*b[i] template<typename Dtype> __global__ void complexMul(const int nthreads, const CaffeComplex<Dtype>* a, const CaffeComplex<Dtype>* b, CaffeComplex<Dtype>* y) { CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of entries y[index] = ComplexMul(a[index], b[index]); } } // dispatchers hipblasStatus_t cublasgemv(hipblasHandle_t handle, hipblasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return hipblasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } hipblasStatus_t cublasgemv(hipblasHandle_t handle, hipblasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return hipblasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } // sum the columns of a M*N source matrix and store it to dst template<typename Dtype> void caffe_sum_cols(const int M, const int N, const Dtype* src, Dtype* dst, Dtype* ones_hw) { Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgemv(Caffe::cublas_handle(), HIPBLAS_OP_T, N, M, &alpha, src, N, ones_hw, 1, &beta, dst, 1), HIPBLAS_STATUS_SUCCESS); } template<> void CompactBilinearLayer<float>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(hipfftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_R2C, hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_C2R, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_C2R, hw)); } template<> void CompactBilinearLayer<double>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(hipfftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_D2Z, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_D2Z, hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_Z2D, batchsz*hw)); CHECK_CUFFT(hipfftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, HIPFFT_Z2D, hw)); } template<typename Dtype> void CompactBilinearLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int hw = bottom[0]->count(2); if (!plan_init) { // some init commands that will only be executed once plan_init = true; Initializations(hw); // get an all one vector CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&ones_hw), sizeof(Dtype) * hw)); caffe_gpu_set(hw, Dtype(1.0), ones_hw); } // memory pointer short hand Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; const int step_top = top[0]->count(1); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; // temporary space allocation Dtype* batchSpace[2]; CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&batchSpace[ipoly]), batchsz * num_output_ * hw * sizeof(Dtype))); CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype>))); } // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); for (int ipoly = 0; ipoly < 2; ++ipoly) { // some short hands Dtype* space = batchSpace[ipoly]; const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), space); // first get count and transpose nthreads = batchlen * step_bottom[ipoly]; GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], space, hw, C[ipoly], num_output_); // now space is batchlen*hw*num_output // then do FFT caffe_gpu_fft(batchlen, hw, num_output_, space, fftSpace[ipoly]); } // entry-wise multiplication int nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, fftSpace[0], fftSpace[1], fftSpace[0]); // ifft caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[0], batchSpace[0]); // transpose back Dtype* out_target; if (sum_pool_) out_target = batchSpace[1]; else out_target = top_data + batchStart * step_top; transpose_batch(batchlen, hw, num_output_, batchSpace[0], out_target); if (sum_pool_) caffe_sum_cols(batchlen * num_output_, hw, out_target, top_data + batchStart * step_top, ones_hw); } // temporary space destroy for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(hipFree(batchSpace[ipoly])); CUDA_CHECK(hipFree(fftSpace[ipoly])); } } template<typename Dtype> __global__ void copy_and_transpose(const int nthreads, const int batch, const int num_output_, const int hw, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batch*num_output_ // dst size: batch*hw*num_output_ // index over dst const int left = index % (hw * num_output_); const int ibatch = index / (hw * num_output_); const int ihw = left / num_output_; const int iout = left % num_output_; dst[index] = src[ibatch * num_output_ + iout]; } } // C, dst, hh and ss are complement template<typename Dtype> __global__ void assign_back(const int nthreads, const Dtype* src, Dtype* dst, const int* hh, const Dtype* ss, const int batchlen, const int C, const int hw, const int num_output_) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batchlen*hw*num_output // dst size: batchlen*C*hw // index over dst const int left = index % (hw * C); const int ibatch = index / (hw * C); const int ic = left / hw; const int ihw = left % hw; dst[index] += ss[ic] * src[(ibatch * hw + ihw) * num_output_ + hh[ic]]; } } template<typename Dtype> __device__ void caffe_gpu_swap(Dtype* a, Dtype* b) { if (a == b) return; Dtype t = *a; *a = *b; *b = t; } template<typename Dtype> __global__ void fliplr(const int nthreads, Dtype* src, const int M, const int N) { CUDA_KERNEL_LOOP(index, nthreads) { // src & dst are M*N // flip left right, loop over src const int m = index / N; const int n = index % N; if ((n <= (N / 2)) && (n >= 1)) caffe_gpu_swap(src + index, src + index - n + N - n); } } template<typename Dtype> void CompactBilinearLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if ((!propagate_down[0]) && (!propagate_down[1])) return; // process the same bottom case // when the two bottoms are the same, one propagate down requires the other vector<bool> pd = propagate_down; if (bottom[0] == bottom[1]) pd[0] = pd[1] = true; // memory pointer short hand const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; Dtype* bottom_diff[2] = { bottom[0]->mutable_gpu_diff(), bottom[1] ->mutable_gpu_diff() }; for (int i = 0; i < 2; ++i) caffe_gpu_set(bottom[i]->count(), Dtype(0.0), bottom_diff[i]); const Dtype* top_diff = top[0]->gpu_diff(); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int step_top = top[0]->count(1); const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; const int hw = bottom[0]->count(2); // the pointer to the (repeated) derivative Dtype* dzdy; CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&dzdy), batchsz * num_output_ * hw * sizeof(Dtype))); // fft[0] for derivative, fft[1] for data CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) CUDA_CHECK(hipMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype> ))); // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); // (copy and) transpose the derivative if (sum_pool_) { int nthreads = batchlen * hw * num_output_; // copy and transpose the derivative copy_and_transpose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } else { // transpose the derivative transpose_batch<Dtype>(batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } // fft the derivative, stored in fftSpace[0] caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[0]); for (int ipoly = 0; ipoly < 2; ++ipoly) if (pd[1 - ipoly]) { // some short hands const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; // first get count and transpose, reuse the dzdy space nthreads = batchlen * step_bottom[ipoly]; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), dzdy); GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], dzdy, hw, C[ipoly], num_output_); // now dzdy is batchlen*hw*num_output_ // fliplr(:, 2:end) nthreads = batchlen * hw * num_output_; fliplr<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, dzdy, batchlen * hw, num_output_); // fft data caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[1]); // elementwise mul nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, fftSpace[0], fftSpace[1], fftSpace[1]); // ifft, again reuse dzdy caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[1], dzdy); // complement projection nthreads = batchlen * hw * C[1 - ipoly]; assign_back<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, dzdy, bottom_diff[1-ipoly] + batchStart * step_bottom[1-ipoly], randh_[1-ipoly].gpu_data(), rands_[1-ipoly].gpu_data(), batchlen, C[1-ipoly], hw, num_output_); } } // temporary space destroy CUDA_CHECK(hipFree(dzdy)); CUDA_CHECK(hipFree(fftSpace[0])); CUDA_CHECK(hipFree(fftSpace[1])); } INSTANTIATE_LAYER_GPU_FUNCS(CompactBilinearLayer); } // namespace caffe
3ea82c076787455fa55ce58b7eb4ea5ca7d0a7b6.cu
#include <algorithm> #include <vector> #include "caffe/layers/compact_bilinear_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { #define CHECK_CUFFT(X) CHECK_EQ((X), CUFFT_SUCCESS) // overloaded functions, to support float and double cublasStatus_t cublasgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const float *alpha, const float *A, int lda, const float *beta, const float *B, int ldb, float *C, int ldc) { return cublasSgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } cublasStatus_t cublasgeam(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, const double *alpha, const double *A, int lda, const double *beta, const double *B, int ldb, double *C, int ldc) { return cublasDgeam(handle, transa, transb, m, n, alpha, A, lda, beta, B, ldb, C, ldc); } // caffe wrapper of transpose function // dst=src^T, with the src size being M*N template<typename Dtype> void caffe_gpu_transpose(int M, const int N, const Dtype* src, Dtype* dst) { CHECK(src != dst) << "support out of place transpose only"; Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgeam(Caffe::cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, M, N, &alpha, src, N, &beta, dst, M, dst, M), CUBLAS_STATUS_SUCCESS); } template<typename Dtype> void transpose_batch(const int batchlen, const int M, const int N, const Dtype* src, Dtype* dst) { const int step = M * N; for (int ins = 0; ins < batchlen; ++ins) caffe_gpu_transpose(M, N, src + ins * step, dst + ins * step); } // wrappers to deal with atomic add of double __device__ void caffe_atomic_add(float* dst, float val) { atomicAdd(dst, val); } __device__ void caffe_atomic_add(double* address, double val) { // code example in the official document at: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html // #atomic-functions // NOLINT_NEXT_LINE(runtime/int) unsigned long long int* address_as_ull = (unsigned long long int*) address; // NOLINT_NEXT_LINE(runtime/int) unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN // (since NaN != NaN) } while (assumed != old); } // do the getCount and do transpose along the way // should clear top to 0 before call template<typename Dtype> __global__ void GPUCountAndTranspose(const int nthreads, const int * hh, const Dtype * ss, const Dtype* bottom, Dtype* top, const int hw, const int C, const int num_output_) { // input batchlen*C*hw // output batchlen*hw*num_output, the transpose of the original output CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of things you need to do // index is the current INPUT point to be computed int left = index % (C * hw); const int ibatch = index / (C * hw); const int ic = left / hw; const int ihw = left % hw; // get the target location const int target = ibatch * (hw * num_output_) + ihw * num_output_ + hh[ic]; // atomic add only supports float not double caffe_atomic_add(top + target, ss[ic] * bottom[index]); } } // some wrappers around cufftExec // float forward cufftResult cufftExec(cufftHandle plan, const float *idata, CaffeComplex<float> *odata) { return cufftExecR2C(plan, reinterpret_cast<cufftReal*>(const_cast<float*>(idata)), reinterpret_cast<cufftComplex*>(odata)); } // double forward cufftResult cufftExec(cufftHandle plan, const double *idata, CaffeComplex<double> *odata) { return cufftExecD2Z(plan, reinterpret_cast<cufftDoubleReal*>(const_cast<double*>(idata)), reinterpret_cast<cufftDoubleComplex*>(odata)); } // float inverse cufftResult cufftExec(cufftHandle plan, const CaffeComplex<float> *idata, float *odata) { return cufftExecC2R(plan, reinterpret_cast<cufftComplex*>( const_cast<CaffeComplex<float>*>(idata)), reinterpret_cast<cufftReal*>(odata)); } // double inverse cufftResult cufftExec(cufftHandle plan, const CaffeComplex<double> *idata, double *odata) { return cufftExecZ2D(plan, reinterpret_cast<cufftDoubleComplex*>( const_cast<CaffeComplex<double>*>(idata)), reinterpret_cast<cufftDoubleReal*>(odata)); } // call cufft to do batch*nffts // cufftReal* src; cufftComplex *output template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_fft(const int batchlen, const int hw, const int nfft, const Dtype* src, CaffeComplex<Dtype>* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_noinv_batch, src, output)); } else { const int step_in = hw * nfft; const int step_out = hw * (floor(1.0 * nfft / 2) + 1); for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_noinv_1, src + step_in * i, output + step_out * i)); } } } template<typename Dtype> void CompactBilinearLayer<Dtype>::caffe_gpu_ifft(const int batchlen, const int hw, const int nfft, const CaffeComplex<Dtype>* src, Dtype* output) { if (batchlen == batchsz) { CHECK_CUFFT(cufftExec(plan_inv_batch, src, output)); } else { const int step_in = hw * (floor(1.0 * nfft / 2) + 1); const int step_out = hw * nfft; for (int i = 0; i < batchlen; ++i) { CHECK_CUFFT( cufftExec(plan_inv_1, src + step_in * i, output + step_out * i)); } } } // Complex multiplication template<typename Dtype> static __device__ __host__ inline CaffeComplex<Dtype> ComplexMul( const CaffeComplex<Dtype> &a, const CaffeComplex<Dtype> &b) { CaffeComplex<Dtype> c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // entrywise multiplication: y[i]=a[i]*b[i] template<typename Dtype> __global__ void complexMul(const int nthreads, const CaffeComplex<Dtype>* a, const CaffeComplex<Dtype>* b, CaffeComplex<Dtype>* y) { CUDA_KERNEL_LOOP(index, nthreads) { // nthreads is the total number of entries y[index] = ComplexMul(a[index], b[index]); } } // dispatchers cublasStatus_t cublasgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) { return cublasSgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } cublasStatus_t cublasgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const double *alpha, const double *A, int lda, const double *x, int incx, const double *beta, double *y, int incy) { return cublasDgemv(handle, trans, m, n, alpha, A, lda, x, incx, beta, y, incy); } // sum the columns of a M*N source matrix and store it to dst template<typename Dtype> void caffe_sum_cols(const int M, const int N, const Dtype* src, Dtype* dst, Dtype* ones_hw) { Dtype alpha = 1.0, beta = 0.0; CHECK_EQ( cublasgemv(Caffe::cublas_handle(), CUBLAS_OP_T, N, M, &alpha, src, N, ones_hw, 1, &beta, dst, 1), CUBLAS_STATUS_SUCCESS); } template<> void CompactBilinearLayer<float>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(cufftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_R2C, hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_C2R, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_C2R, hw)); } template<> void CompactBilinearLayer<double>::Initializations(const int hw) { int n = num_output_; // each plan is signatured by (R2C, batchsz) CHECK_CUFFT(cufftPlanMany(&plan_noinv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_D2Z, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_noinv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_D2Z, hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_batch, 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_Z2D, batchsz*hw)); CHECK_CUFFT(cufftPlanMany(&plan_inv_1 , 1, &n, NULL, 0, 0, NULL, 0, 0, CUFFT_Z2D, hw)); } template<typename Dtype> void CompactBilinearLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const int hw = bottom[0]->count(2); if (!plan_init) { // some init commands that will only be executed once plan_init = true; Initializations(hw); // get an all one vector CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&ones_hw), sizeof(Dtype) * hw)); caffe_gpu_set(hw, Dtype(1.0), ones_hw); } // memory pointer short hand Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; const int step_top = top[0]->count(1); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; // temporary space allocation Dtype* batchSpace[2]; CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&batchSpace[ipoly]), batchsz * num_output_ * hw * sizeof(Dtype))); CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype>))); } // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); for (int ipoly = 0; ipoly < 2; ++ipoly) { // some short hands Dtype* space = batchSpace[ipoly]; const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), space); // first get count and transpose nthreads = batchlen * step_bottom[ipoly]; GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], space, hw, C[ipoly], num_output_); // now space is batchlen*hw*num_output // then do FFT caffe_gpu_fft(batchlen, hw, num_output_, space, fftSpace[ipoly]); } // entry-wise multiplication int nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, fftSpace[0], fftSpace[1], fftSpace[0]); // ifft caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[0], batchSpace[0]); // transpose back Dtype* out_target; if (sum_pool_) out_target = batchSpace[1]; else out_target = top_data + batchStart * step_top; transpose_batch(batchlen, hw, num_output_, batchSpace[0], out_target); if (sum_pool_) caffe_sum_cols(batchlen * num_output_, hw, out_target, top_data + batchStart * step_top, ones_hw); } // temporary space destroy for (int ipoly = 0; ipoly < 2; ++ipoly) { CUDA_CHECK(cudaFree(batchSpace[ipoly])); CUDA_CHECK(cudaFree(fftSpace[ipoly])); } } template<typename Dtype> __global__ void copy_and_transpose(const int nthreads, const int batch, const int num_output_, const int hw, const Dtype* src, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batch*num_output_ // dst size: batch*hw*num_output_ // index over dst const int left = index % (hw * num_output_); const int ibatch = index / (hw * num_output_); const int ihw = left / num_output_; const int iout = left % num_output_; dst[index] = src[ibatch * num_output_ + iout]; } } // C, dst, hh and ss are complement template<typename Dtype> __global__ void assign_back(const int nthreads, const Dtype* src, Dtype* dst, const int* hh, const Dtype* ss, const int batchlen, const int C, const int hw, const int num_output_) { CUDA_KERNEL_LOOP(index, nthreads) { // src size: batchlen*hw*num_output // dst size: batchlen*C*hw // index over dst const int left = index % (hw * C); const int ibatch = index / (hw * C); const int ic = left / hw; const int ihw = left % hw; dst[index] += ss[ic] * src[(ibatch * hw + ihw) * num_output_ + hh[ic]]; } } template<typename Dtype> __device__ void caffe_gpu_swap(Dtype* a, Dtype* b) { if (a == b) return; Dtype t = *a; *a = *b; *b = t; } template<typename Dtype> __global__ void fliplr(const int nthreads, Dtype* src, const int M, const int N) { CUDA_KERNEL_LOOP(index, nthreads) { // src & dst are M*N // flip left right, loop over src const int m = index / N; const int n = index % N; if ((n <= (N / 2)) && (n >= 1)) caffe_gpu_swap(src + index, src + index - n + N - n); } } template<typename Dtype> void CompactBilinearLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if ((!propagate_down[0]) && (!propagate_down[1])) return; // process the same bottom case // when the two bottoms are the same, one propagate down requires the other vector<bool> pd = propagate_down; if (bottom[0] == bottom[1]) pd[0] = pd[1] = true; // memory pointer short hand const Dtype* bottom_data[2] = { bottom[0]->gpu_data(), bottom[1]->gpu_data() }; Dtype* bottom_diff[2] = { bottom[0]->mutable_gpu_diff(), bottom[1] ->mutable_gpu_diff() }; for (int i = 0; i < 2; ++i) caffe_gpu_set(bottom[i]->count(), Dtype(0.0), bottom_diff[i]); const Dtype* top_diff = top[0]->gpu_diff(); const int step_bottom[2] = { bottom[0]->count(1), bottom[1]->count(1) }; const int step_top = top[0]->count(1); const int C[2] = { bottom[0]->shape(1), bottom[1]->shape(1) }; const int hw = bottom[0]->count(2); // the pointer to the (repeated) derivative Dtype* dzdy; CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&dzdy), batchsz * num_output_ * hw * sizeof(Dtype))); // fft[0] for derivative, fft[1] for data CaffeComplex<Dtype>* fftSpace[2]; for (int ipoly = 0; ipoly < 2; ++ipoly) CUDA_CHECK(cudaMalloc(reinterpret_cast<void**>(&fftSpace[ipoly]), batchsz * num_complex_out * hw * sizeof(CaffeComplex<Dtype> ))); // batching process each bottom const int totalSamples = bottom[0]->shape(0); for (int batchStart = 0; batchStart < totalSamples; batchStart += batchsz) { const int batchlen = min(batchsz, totalSamples - batchStart); // (copy and) transpose the derivative if (sum_pool_) { int nthreads = batchlen * hw * num_output_; // copy and transpose the derivative copy_and_transpose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } else { // transpose the derivative transpose_batch<Dtype>(batchlen, num_output_, hw, top_diff + batchStart * step_top, dzdy); } // fft the derivative, stored in fftSpace[0] caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[0]); for (int ipoly = 0; ipoly < 2; ++ipoly) if (pd[1 - ipoly]) { // some short hands const int * hh = randh_[ipoly].gpu_data(); const Dtype * ss = rands_[ipoly].gpu_data(); int nthreads; // first get count and transpose, reuse the dzdy space nthreads = batchlen * step_bottom[ipoly]; caffe_gpu_set(batchlen * hw * num_output_, Dtype(0.0), dzdy); GPUCountAndTranspose<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, hh, ss, bottom_data[ipoly] + batchStart * step_bottom[ipoly], dzdy, hw, C[ipoly], num_output_); // now dzdy is batchlen*hw*num_output_ // fliplr(:, 2:end) nthreads = batchlen * hw * num_output_; fliplr<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, dzdy, batchlen * hw, num_output_); // fft data caffe_gpu_fft(batchlen, hw, num_output_, dzdy, fftSpace[1]); // elementwise mul nthreads = batchlen * hw * num_complex_out; complexMul<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, fftSpace[0], fftSpace[1], fftSpace[1]); // ifft, again reuse dzdy caffe_gpu_ifft(batchlen, hw, num_output_, fftSpace[1], dzdy); // complement projection nthreads = batchlen * hw * C[1 - ipoly]; assign_back<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>( nthreads, dzdy, bottom_diff[1-ipoly] + batchStart * step_bottom[1-ipoly], randh_[1-ipoly].gpu_data(), rands_[1-ipoly].gpu_data(), batchlen, C[1-ipoly], hw, num_output_); } } // temporary space destroy CUDA_CHECK(cudaFree(dzdy)); CUDA_CHECK(cudaFree(fftSpace[0])); CUDA_CHECK(cudaFree(fftSpace[1])); } INSTANTIATE_LAYER_GPU_FUNCS(CompactBilinearLayer); } // namespace caffe
444590fbe85248bf7e210213b3913ca7e4388437.hip
// !!! This is a file automatically generated by hipify!!! #include "MortonUtils.hpp" #include "libmorton/include/morton.h" #include <hip/hip_runtime.h> __host__ __device__ Code_t pointToCode( const float x, const float y, const float z, const float min_coord, const float range) { const uint32_t bitscale = 0xFFFFFFFFu >> (32 - (CODE_LEN / 3)); const uint32_t x_coord = static_cast<uint32_t>(bitscale * ((x - min_coord) / range)); const uint32_t y_coord = static_cast<uint32_t>(bitscale * ((y - min_coord) / range)); const uint32_t z_coord = static_cast<uint32_t>(bitscale * ((z - min_coord) / range)); // printf("Point %lu = (%u, %u, %u)\n", (unsigned long)idx, (unsigned int)x_coord, (unsigned int)y_coord, (unsigned int)z_coord); return morton3D_64_encode(x_coord, y_coord, z_coord); } __host__ __device__ Point codeToPoint(const Code_t code, const float min_coord, const float range) { const uint32_t bitscale = 0xFFFFFFFFu >> (32 - (CODE_LEN / 3)); uint32_t dec_raw_x, dec_raw_y, dec_raw_z; morton3D_64_decode(code, dec_raw_x, dec_raw_y, dec_raw_z); float dec_x = ((float)dec_raw_x / bitscale) * range + min_coord; float dec_y = ((float)dec_raw_y / bitscale) * range + min_coord; float dec_z = ((float)dec_raw_z / bitscale) * range + min_coord; return Point(dec_x, dec_y, dec_z); }
444590fbe85248bf7e210213b3913ca7e4388437.cu
#include "MortonUtils.hpp" #include "libmorton/include/morton.h" #include <cuda.h> __host__ __device__ Code_t pointToCode( const float x, const float y, const float z, const float min_coord, const float range) { const uint32_t bitscale = 0xFFFFFFFFu >> (32 - (CODE_LEN / 3)); const uint32_t x_coord = static_cast<uint32_t>(bitscale * ((x - min_coord) / range)); const uint32_t y_coord = static_cast<uint32_t>(bitscale * ((y - min_coord) / range)); const uint32_t z_coord = static_cast<uint32_t>(bitscale * ((z - min_coord) / range)); // printf("Point %lu = (%u, %u, %u)\n", (unsigned long)idx, (unsigned int)x_coord, (unsigned int)y_coord, (unsigned int)z_coord); return morton3D_64_encode(x_coord, y_coord, z_coord); } __host__ __device__ Point codeToPoint(const Code_t code, const float min_coord, const float range) { const uint32_t bitscale = 0xFFFFFFFFu >> (32 - (CODE_LEN / 3)); uint32_t dec_raw_x, dec_raw_y, dec_raw_z; morton3D_64_decode(code, dec_raw_x, dec_raw_y, dec_raw_z); float dec_x = ((float)dec_raw_x / bitscale) * range + min_coord; float dec_y = ((float)dec_raw_y / bitscale) * range + min_coord; float dec_z = ((float)dec_raw_z / bitscale) * range + min_coord; return Point(dec_x, dec_y, dec_z); }
089e00082e63b831167e4a3b1a835ce28421b9e0.hip
// !!! This is a file automatically generated by hipify!!! /*---------------------------------------------------------------------- CUDA C extension for Python This extension module provides auxiliary functionality for list-mode data processing, generating look-up tables for image reconstruction. author: Pawel Markiewicz Copyrights: 2018 ----------------------------------------------------------------------*/ #define PY_SSIZE_T_CLEAN #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION #include "Python.h" #include "auxmath.h" #include "def.h" #include "norm.h" #include "numpy/arrayobject.h" #include "pycuvec.cuh" #include "scanner_0.h" #include <stdlib.h> //=== START PYTHON INIT === //--- Available functions static PyObject *mmr_norm(PyObject *self, PyObject *args); static PyObject *mmr_span11LUT(PyObject *self, PyObject *args); static PyObject *mmr_pgaps(PyObject *self, PyObject *args); static PyObject *mmr_rgaps(PyObject *self, PyObject *args); static PyObject *aux_varon(PyObject *self, PyObject *args); //--- //> Module Method Table static PyMethodDef mmr_auxe_methods[] = { {"norm", mmr_norm, METH_VARARGS, "Create 3D normalisation sinograms from provided normalisation components."}, {"s1s11", mmr_span11LUT, METH_VARARGS, "Create span-1 to span-11 look up table."}, {"pgaps", mmr_pgaps, METH_VARARGS, "Create span-11 Siemens compatible sinograms by inserting gaps into the GPU-optimised " "sinograms in span-11."}, {"rgaps", mmr_rgaps, METH_VARARGS, "Create span-11 GPU-optimised sinograms by removing the gaps in Siemens-compatible sinograms " "in span-11"}, {"varon", aux_varon, METH_VARARGS, "Calculate variance online for the provided vector."}, {NULL, NULL, 0, NULL} // Sentinel }; //> Module Definition Structure static struct PyModuleDef mmr_auxe_module = { PyModuleDef_HEAD_INIT, //> name of module "mmr_auxe", //> module documentation, may be NULL "Initialisation and basic processing routines for the Siemens Biograph mMR.", //> the module keeps state in global variables. -1, mmr_auxe_methods}; //> Initialization function PyMODINIT_FUNC PyInit_mmr_auxe(void) { Py_Initialize(); //> load NumPy functionality import_array(); return PyModule_Create(&mmr_auxe_module); } //=== END PYTHON INIT === //============================================================================== //============================================================================== // N O R M A L I S A T I O N (component based) //------------------------------------------------------------------------------ static PyObject *mmr_norm(PyObject *self, PyObject *args) { // Structure of constants Cnst Cnt; // Dictionary of scanner constants PyObject *o_mmrcnst; // structure of norm C arrays (defined in norm.h). NormCmp normc; // structure of axial LUTs in C arrays (defined in norm.h). axialLUT axLUT; // Output norm sino PyObject *o_sino = NULL; // normalisation component dictionary. PyObject *o_norm_cmp; // axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos. PyObject *o_axLUT; // 2D sino index LUT (dead bisn are out). PyObject *o_aw2ali = NULL; // singles buckets for dead time correction PyObject *o_bckts = NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOOO", &o_sino, &o_norm_cmp, &o_bckts, &o_axLUT, &o_aw2ali, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as numpy arrays. */ // norm components: PyObject *pd_geo = PyDict_GetItemString(o_norm_cmp, "geo"); PyObject *pd_cinf = PyDict_GetItemString(o_norm_cmp, "cinf"); PyObject *pd_ceff = PyDict_GetItemString(o_norm_cmp, "ceff"); PyObject *pd_axe1 = PyDict_GetItemString(o_norm_cmp, "axe1"); PyObject *pd_dtp = PyDict_GetItemString(o_norm_cmp, "dtp"); PyObject *pd_dtnp = PyDict_GetItemString(o_norm_cmp, "dtnp"); PyObject *pd_dtc = PyDict_GetItemString(o_norm_cmp, "dtc"); PyObject *pd_axe2 = PyDict_GetItemString(o_norm_cmp, "axe2"); PyObject *pd_axf1 = PyDict_GetItemString(o_norm_cmp, "axf1"); // axial LUTs: PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno"); PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn"); PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos"); PyObject *pd_sn1sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11"); PyObject *pd_sn1rno = PyDict_GetItemString(o_axLUT, "sn1_rno"); PyObject *pd_sn1sn11no = PyDict_GetItemString(o_axLUT, "sn1_sn11no"); PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_span); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // get the output sino PyArrayObject *p_sino = NULL; p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); //-- get the arrays from the dictionaries // norm components PyArrayObject *p_geo = NULL; p_geo = (PyArrayObject *)PyArray_FROM_OTF(pd_geo, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_cinf = NULL; p_cinf = (PyArrayObject *)PyArray_FROM_OTF(pd_cinf, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_ceff = NULL; p_ceff = (PyArrayObject *)PyArray_FROM_OTF(pd_ceff, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axe1 = NULL; p_axe1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtp = NULL; p_dtp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtnp = NULL; p_dtnp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtnp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtc = NULL; p_dtc = (PyArrayObject *)PyArray_FROM_OTF(pd_dtc, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axe2 = NULL; p_axe2 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe2, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axf1 = NULL; p_axf1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axf1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); // then axLUTs PyArrayObject *p_li2rno = NULL; p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_li2sn = NULL; p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_li2nos = NULL; p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1sn11 = NULL; p_sn1sn11 = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11, NPY_INT16, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1rno = NULL; p_sn1rno = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1rno, NPY_INT16, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1sn11no = NULL; p_sn1sn11no = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11no, NPY_INT8, NPY_ARRAY_IN_ARRAY); // 2D sino index LUT: PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(o_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); // single bucktes: PyArrayObject *p_bckts = NULL; p_bckts = (PyArrayObject *)PyArray_FROM_OTF(o_bckts, NPY_INT32, NPY_ARRAY_IN_ARRAY); //-- /* If that didn't work, throw an exception. */ if (p_geo == NULL || p_cinf == NULL || p_ceff == NULL || p_axe1 == NULL || p_dtp == NULL || p_dtnp == NULL || p_dtc == NULL || p_axe2 == NULL || p_axf1 == NULL || p_li2rno == NULL || p_li2sn == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_sn1sn11 == NULL || p_sn1rno == NULL || p_sn1sn11no == NULL || p_sino == NULL) { Py_XDECREF(p_geo); Py_XDECREF(p_cinf); Py_XDECREF(p_ceff); Py_XDECREF(p_axe1); Py_XDECREF(p_dtp); Py_XDECREF(p_dtnp); Py_XDECREF(p_dtc); Py_XDECREF(p_axe2); Py_XDECREF(p_axf1); // axLUTs Py_XDECREF(p_li2rno); Py_XDECREF(p_li2sn); Py_XDECREF(p_li2nos); Py_XDECREF(p_sn1sn11); Py_XDECREF(p_sn1rno); Py_XDECREF(p_sn1sn11no); // 2D sino LUT Py_XDECREF(p_aw2ali); // singles buckets Py_XDECREF(p_bckts); // output sino PyArray_DiscardWritebackIfCopy(p_sino); Py_XDECREF(p_sino); return NULL; } //-- get the pointers to the data as C-types // norm components normc.geo = (float *)PyArray_DATA(p_geo); normc.cinf = (float *)PyArray_DATA(p_cinf); normc.ceff = (float *)PyArray_DATA(p_ceff); normc.axe1 = (float *)PyArray_DATA(p_axe1); normc.dtp = (float *)PyArray_DATA(p_dtp); normc.dtnp = (float *)PyArray_DATA(p_dtnp); normc.dtc = (float *)PyArray_DATA(p_dtc); normc.axe2 = (float *)PyArray_DATA(p_axe2); normc.axf1 = (float *)PyArray_DATA(p_axf1); // axLUTs axLUT.li2rno = (int *)PyArray_DATA(p_li2rno); axLUT.li2sn = (int *)PyArray_DATA(p_li2sn); axLUT.li2nos = (int *)PyArray_DATA(p_li2nos); axLUT.sn1_sn11 = (short *)PyArray_DATA(p_sn1sn11); axLUT.sn1_rno = (short *)PyArray_DATA(p_sn1rno); axLUT.sn1_sn11no = (char *)PyArray_DATA(p_sn1sn11no); // 2D sino index LUT int *aw2ali = (int *)PyArray_DATA(p_aw2ali); // singles bucktes int *bckts = (int *)PyArray_DATA(p_bckts); //--- Array size int Naw = (int)PyArray_DIM(p_aw2ali, 0); if (AW != Naw) printf("\ne> number of active bins is inconsitent !!! <<------------------<<<<<\n"); // output sino float *sino = (float *)PyArray_DATA(p_sino); // norm components normc.ngeo[0] = (int)PyArray_DIM(p_geo, 0); normc.ngeo[1] = (int)PyArray_DIM(p_geo, 1); normc.ncinf[0] = (int)PyArray_DIM(p_cinf, 0); normc.ncinf[1] = (int)PyArray_DIM(p_cinf, 1); normc.nceff[0] = (int)PyArray_DIM(p_ceff, 0); normc.nceff[1] = (int)PyArray_DIM(p_ceff, 1); normc.naxe = (int)PyArray_DIM(p_axe1, 0); normc.nrdt = (int)PyArray_DIM(p_dtp, 0); normc.ncdt = (int)PyArray_DIM(p_dtc, 0); // axial LUTs: axLUT.Nli2rno[0] = (int)PyArray_DIM(p_li2rno, 0); axLUT.Nli2rno[1] = (int)PyArray_DIM(p_li2rno, 1); axLUT.Nli2sn[0] = (int)PyArray_DIM(p_li2sn, 0); axLUT.Nli2sn[1] = (int)PyArray_DIM(p_li2sn, 1); axLUT.Nli2nos = (int)PyArray_DIM(p_li2nos, 0); // sets the device on which to calculate HANDLE_ERROR(hipSetDevice(Cnt.DEVID)); //<><><><><><><><><><> Call the CUDA stuff now norm_from_components(sino, normc, axLUT, aw2ali, bckts, Cnt); //<><><><><><><><><><> //-- Clear up // norm components Py_DECREF(p_geo); Py_DECREF(p_cinf); Py_DECREF(p_ceff); Py_DECREF(p_axe1); Py_DECREF(p_dtp); Py_DECREF(p_dtnp); Py_DECREF(p_dtc); Py_DECREF(p_axe2); // axLUT Py_DECREF(p_li2rno); Py_DECREF(p_li2sn); Py_DECREF(p_li2nos); // 2D sino index LUT Py_DECREF(p_aw2ali); // singles buckets Py_DECREF(p_bckts); // output sino PyArray_ResolveWritebackIfCopy(p_sino); Py_DECREF(p_sino); Py_INCREF(Py_None); return Py_None; } //==================================================================================================== static PyObject *mmr_pgaps(PyObject *self, PyObject *args) { // output sino PyObject *o_sino; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject *o_txLUT; // Dictionary of scanner constants PyObject *o_mmrcnst; // GPU input sino in span-11 PyObject *o_sng; // Structure of constants Cnst Cnt; int sino_no; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOi", &o_sino, &o_sng, &o_txLUT, &o_mmrcnst, &sino_no)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as... */ PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyLong_AsLong(pd_A); PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyLong_AsLong(pd_W); PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_SPN); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT"); PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END"); Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt); Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend); // GPU 2D linear sino index into Siemens sino index LUT PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali"); // GPU input sino and the above 2D LUT PyArrayObject *p_sng = NULL; p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); // output sino PyArrayObject *p_sino = NULL; p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); if (p_sng == NULL || p_aw2ali == NULL || p_sino == NULL) { Py_XDECREF(p_aw2ali); Py_XDECREF(p_sng); PyArray_DiscardWritebackIfCopy(p_sino); Py_XDECREF(p_sino); } int *aw2ali = (int *)PyArray_DATA(p_aw2ali); float *sng = (float *)PyArray_DATA(p_sng); // output sino float *sino = (float *)PyArray_DATA(p_sino); // sets the device on which to calculate HANDLE_ERROR(hipSetDevice(Cnt.DEVID)); //<><><><><><><><><><><><><><><><><><><><><><> // Run the conversion to sinos with gaps put_gaps(sino, sng, aw2ali, sino_no, Cnt); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up Py_DECREF(p_aw2ali); Py_DECREF(p_sng); PyArray_ResolveWritebackIfCopy(p_sino); Py_DECREF(p_sino); Py_INCREF(Py_None); return Py_None; } //==================================================================================================== static PyObject *mmr_rgaps(PyObject *self, PyObject *args) { PyCuVec<float> *o_sng = NULL; // output sino with gaps removed PyCuVec<float> *o_sino = NULL; // input sino to be reformated with gaps removed PyObject *o_txLUT; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject *o_mmrcnst; // Dictionary of scanner constants Cnst Cnt; // Structure of constants if (!PyArg_ParseTuple(args, "O&O&OO", &asPyCuVec_f, &o_sng, &asPyCuVec_f, &o_sino, &o_txLUT, &o_mmrcnst)) return NULL; /* Interpret the input objects as... PyLong_AsLong*/ PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1); PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyLong_AsLong(pd_A); PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyLong_AsLong(pd_W); PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_SPN); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // GPU 2D linear sino index into Siemens sino index LUT PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali"); // input sino and the above 2D LUT PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); if (p_aw2ali == NULL) { Py_XDECREF(p_aw2ali); } int *aw2ali = (int *)PyArray_DATA(p_aw2ali); // number of sinogram from the shape of the sino (can be any number especially when using reduced // ring number) int snno = o_sino->shape[0]; //<><><><><><><><><><><><><><><><><><><><><><> HANDLE_ERROR(hipSetDevice(Cnt.DEVID)); // Run the conversion to GPU sinos remove_gaps(o_sng->vec.data(), o_sino->vec.data(), snno, aw2ali, Cnt); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up Py_DECREF(p_aw2ali); Py_INCREF(Py_None); return Py_None; } void free_capsule(PyObject *capsule) { void *data = PyCapsule_GetPointer(capsule, NULL); free(data); } //==================================================================================================== static PyObject *mmr_span11LUT(PyObject *self, PyObject *args) { // Dictionary of scanner constants PyObject *o_mmrcnst; // Structure of constants Cnst Cnt; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "O", &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as... */ PyObject *pd_Naw = PyDict_GetItemString(o_mmrcnst, "Naw"); Cnt.aw = (int)PyLong_AsLong(pd_Naw); PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1); PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG"); Cnt.NRNG = (int)PyLong_AsLong(pd_NRNG); span11LUT span11 = span1_span11(Cnt); npy_intp dims[2]; dims[0] = Cnt.NSN1; PyArrayObject *s1s11_out = (PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT16, span11.li2s11); PyObject *capsule = PyCapsule_New(span11.li2s11, NULL, free_capsule); PyArray_SetBaseObject(s1s11_out, capsule); dims[0] = Cnt.NSN11; PyArrayObject *s1nos_out = (PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT8, span11.NSinos); capsule = PyCapsule_New(span11.NSinos, NULL, free_capsule); PyArray_SetBaseObject(s1nos_out, capsule); PyObject *o_out = PyTuple_New(2); PyTuple_SetItem(o_out, 0, PyArray_Return(s1s11_out)); PyTuple_SetItem(o_out, 1, PyArray_Return(s1nos_out)); return o_out; } //==================================================================================================== static PyObject *aux_varon(PyObject *self, PyObject *args) { // M1 (mean) vector PyObject *o_m1; // M2 (variance) vector PyObject *o_m2; // input of instance data X PyObject *o_x; // Dictionary of scanner constants PyObject *o_mmrcnst; // Structure of constants Cnst Cnt; // realisation number int b; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOiO", &o_m1, &o_m2, &o_x, &b, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // input sino and the above 2D LUT PyArrayObject *p_m1 = NULL; p_m1 = (PyArrayObject *)PyArray_FROM_OTF(o_m1, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); PyArrayObject *p_m2 = NULL; p_m2 = (PyArrayObject *)PyArray_FROM_OTF(o_m2, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); PyArrayObject *p_x = NULL; p_x = (PyArrayObject *)PyArray_FROM_OTF(o_x, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); if (p_m1 == NULL || p_m2 == NULL || p_x == NULL) { PyArray_DiscardWritebackIfCopy(p_m1); PyArray_DiscardWritebackIfCopy(p_m2); Py_XDECREF(p_m1); Py_XDECREF(p_m2); Py_XDECREF(p_x); } float *m1 = (float *)PyArray_DATA(p_m1); float *m2 = (float *)PyArray_DATA(p_m2); float *x = (float *)PyArray_DATA(p_x); int ndim = PyArray_NDIM(p_x); size_t nele = 1; for (int i = 0; i < ndim; i++) { nele *= PyArray_DIM(p_x, i); } printf("i> number of elements in data array: %lu\n", nele); // sets the device on which to calculate HANDLE_ERROR(hipSetDevice(Cnt.DEVID)); //<><><><><><><><><><><><><><><><><><><><><><> // Update variance online (M1, M2) using data instance X var_online(m1, m2, x, b, nele); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up PyArray_ResolveWritebackIfCopy(p_m1); PyArray_ResolveWritebackIfCopy(p_m2); Py_DECREF(p_m1); Py_DECREF(p_m2); Py_DECREF(p_x); Py_INCREF(Py_None); return Py_None; }
089e00082e63b831167e4a3b1a835ce28421b9e0.cu
/*---------------------------------------------------------------------- CUDA C extension for Python This extension module provides auxiliary functionality for list-mode data processing, generating look-up tables for image reconstruction. author: Pawel Markiewicz Copyrights: 2018 ----------------------------------------------------------------------*/ #define PY_SSIZE_T_CLEAN #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION // NPY_API_VERSION #include "Python.h" #include "auxmath.h" #include "def.h" #include "norm.h" #include "numpy/arrayobject.h" #include "pycuvec.cuh" #include "scanner_0.h" #include <stdlib.h> //=== START PYTHON INIT === //--- Available functions static PyObject *mmr_norm(PyObject *self, PyObject *args); static PyObject *mmr_span11LUT(PyObject *self, PyObject *args); static PyObject *mmr_pgaps(PyObject *self, PyObject *args); static PyObject *mmr_rgaps(PyObject *self, PyObject *args); static PyObject *aux_varon(PyObject *self, PyObject *args); //--- //> Module Method Table static PyMethodDef mmr_auxe_methods[] = { {"norm", mmr_norm, METH_VARARGS, "Create 3D normalisation sinograms from provided normalisation components."}, {"s1s11", mmr_span11LUT, METH_VARARGS, "Create span-1 to span-11 look up table."}, {"pgaps", mmr_pgaps, METH_VARARGS, "Create span-11 Siemens compatible sinograms by inserting gaps into the GPU-optimised " "sinograms in span-11."}, {"rgaps", mmr_rgaps, METH_VARARGS, "Create span-11 GPU-optimised sinograms by removing the gaps in Siemens-compatible sinograms " "in span-11"}, {"varon", aux_varon, METH_VARARGS, "Calculate variance online for the provided vector."}, {NULL, NULL, 0, NULL} // Sentinel }; //> Module Definition Structure static struct PyModuleDef mmr_auxe_module = { PyModuleDef_HEAD_INIT, //> name of module "mmr_auxe", //> module documentation, may be NULL "Initialisation and basic processing routines for the Siemens Biograph mMR.", //> the module keeps state in global variables. -1, mmr_auxe_methods}; //> Initialization function PyMODINIT_FUNC PyInit_mmr_auxe(void) { Py_Initialize(); //> load NumPy functionality import_array(); return PyModule_Create(&mmr_auxe_module); } //=== END PYTHON INIT === //============================================================================== //============================================================================== // N O R M A L I S A T I O N (component based) //------------------------------------------------------------------------------ static PyObject *mmr_norm(PyObject *self, PyObject *args) { // Structure of constants Cnst Cnt; // Dictionary of scanner constants PyObject *o_mmrcnst; // structure of norm C arrays (defined in norm.h). NormCmp normc; // structure of axial LUTs in C arrays (defined in norm.h). axialLUT axLUT; // Output norm sino PyObject *o_sino = NULL; // normalisation component dictionary. PyObject *o_norm_cmp; // axial LUT dicionary. contains such LUTs: li2rno, li2sn, li2nos. PyObject *o_axLUT; // 2D sino index LUT (dead bisn are out). PyObject *o_aw2ali = NULL; // singles buckets for dead time correction PyObject *o_bckts = NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOOO", &o_sino, &o_norm_cmp, &o_bckts, &o_axLUT, &o_aw2ali, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as numpy arrays. */ // norm components: PyObject *pd_geo = PyDict_GetItemString(o_norm_cmp, "geo"); PyObject *pd_cinf = PyDict_GetItemString(o_norm_cmp, "cinf"); PyObject *pd_ceff = PyDict_GetItemString(o_norm_cmp, "ceff"); PyObject *pd_axe1 = PyDict_GetItemString(o_norm_cmp, "axe1"); PyObject *pd_dtp = PyDict_GetItemString(o_norm_cmp, "dtp"); PyObject *pd_dtnp = PyDict_GetItemString(o_norm_cmp, "dtnp"); PyObject *pd_dtc = PyDict_GetItemString(o_norm_cmp, "dtc"); PyObject *pd_axe2 = PyDict_GetItemString(o_norm_cmp, "axe2"); PyObject *pd_axf1 = PyDict_GetItemString(o_norm_cmp, "axf1"); // axial LUTs: PyObject *pd_li2rno = PyDict_GetItemString(o_axLUT, "li2rno"); PyObject *pd_li2sn = PyDict_GetItemString(o_axLUT, "li2sn"); PyObject *pd_li2nos = PyDict_GetItemString(o_axLUT, "li2nos"); PyObject *pd_sn1sn11 = PyDict_GetItemString(o_axLUT, "sn1_sn11"); PyObject *pd_sn1rno = PyDict_GetItemString(o_axLUT, "sn1_rno"); PyObject *pd_sn1sn11no = PyDict_GetItemString(o_axLUT, "sn1_sn11no"); PyObject *pd_span = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_span); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // get the output sino PyArrayObject *p_sino = NULL; p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); //-- get the arrays from the dictionaries // norm components PyArrayObject *p_geo = NULL; p_geo = (PyArrayObject *)PyArray_FROM_OTF(pd_geo, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_cinf = NULL; p_cinf = (PyArrayObject *)PyArray_FROM_OTF(pd_cinf, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_ceff = NULL; p_ceff = (PyArrayObject *)PyArray_FROM_OTF(pd_ceff, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axe1 = NULL; p_axe1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtp = NULL; p_dtp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtnp = NULL; p_dtnp = (PyArrayObject *)PyArray_FROM_OTF(pd_dtnp, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_dtc = NULL; p_dtc = (PyArrayObject *)PyArray_FROM_OTF(pd_dtc, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axe2 = NULL; p_axe2 = (PyArrayObject *)PyArray_FROM_OTF(pd_axe2, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_axf1 = NULL; p_axf1 = (PyArrayObject *)PyArray_FROM_OTF(pd_axf1, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); // then axLUTs PyArrayObject *p_li2rno = NULL; p_li2rno = (PyArrayObject *)PyArray_FROM_OTF(pd_li2rno, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_li2sn = NULL; p_li2sn = (PyArrayObject *)PyArray_FROM_OTF(pd_li2sn, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_li2nos = NULL; p_li2nos = (PyArrayObject *)PyArray_FROM_OTF(pd_li2nos, NPY_INT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1sn11 = NULL; p_sn1sn11 = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11, NPY_INT16, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1rno = NULL; p_sn1rno = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1rno, NPY_INT16, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_sn1sn11no = NULL; p_sn1sn11no = (PyArrayObject *)PyArray_FROM_OTF(pd_sn1sn11no, NPY_INT8, NPY_ARRAY_IN_ARRAY); // 2D sino index LUT: PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(o_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); // single bucktes: PyArrayObject *p_bckts = NULL; p_bckts = (PyArrayObject *)PyArray_FROM_OTF(o_bckts, NPY_INT32, NPY_ARRAY_IN_ARRAY); //-- /* If that didn't work, throw an exception. */ if (p_geo == NULL || p_cinf == NULL || p_ceff == NULL || p_axe1 == NULL || p_dtp == NULL || p_dtnp == NULL || p_dtc == NULL || p_axe2 == NULL || p_axf1 == NULL || p_li2rno == NULL || p_li2sn == NULL || p_li2nos == NULL || p_aw2ali == NULL || p_sn1sn11 == NULL || p_sn1rno == NULL || p_sn1sn11no == NULL || p_sino == NULL) { Py_XDECREF(p_geo); Py_XDECREF(p_cinf); Py_XDECREF(p_ceff); Py_XDECREF(p_axe1); Py_XDECREF(p_dtp); Py_XDECREF(p_dtnp); Py_XDECREF(p_dtc); Py_XDECREF(p_axe2); Py_XDECREF(p_axf1); // axLUTs Py_XDECREF(p_li2rno); Py_XDECREF(p_li2sn); Py_XDECREF(p_li2nos); Py_XDECREF(p_sn1sn11); Py_XDECREF(p_sn1rno); Py_XDECREF(p_sn1sn11no); // 2D sino LUT Py_XDECREF(p_aw2ali); // singles buckets Py_XDECREF(p_bckts); // output sino PyArray_DiscardWritebackIfCopy(p_sino); Py_XDECREF(p_sino); return NULL; } //-- get the pointers to the data as C-types // norm components normc.geo = (float *)PyArray_DATA(p_geo); normc.cinf = (float *)PyArray_DATA(p_cinf); normc.ceff = (float *)PyArray_DATA(p_ceff); normc.axe1 = (float *)PyArray_DATA(p_axe1); normc.dtp = (float *)PyArray_DATA(p_dtp); normc.dtnp = (float *)PyArray_DATA(p_dtnp); normc.dtc = (float *)PyArray_DATA(p_dtc); normc.axe2 = (float *)PyArray_DATA(p_axe2); normc.axf1 = (float *)PyArray_DATA(p_axf1); // axLUTs axLUT.li2rno = (int *)PyArray_DATA(p_li2rno); axLUT.li2sn = (int *)PyArray_DATA(p_li2sn); axLUT.li2nos = (int *)PyArray_DATA(p_li2nos); axLUT.sn1_sn11 = (short *)PyArray_DATA(p_sn1sn11); axLUT.sn1_rno = (short *)PyArray_DATA(p_sn1rno); axLUT.sn1_sn11no = (char *)PyArray_DATA(p_sn1sn11no); // 2D sino index LUT int *aw2ali = (int *)PyArray_DATA(p_aw2ali); // singles bucktes int *bckts = (int *)PyArray_DATA(p_bckts); //--- Array size int Naw = (int)PyArray_DIM(p_aw2ali, 0); if (AW != Naw) printf("\ne> number of active bins is inconsitent !!! <<------------------<<<<<\n"); // output sino float *sino = (float *)PyArray_DATA(p_sino); // norm components normc.ngeo[0] = (int)PyArray_DIM(p_geo, 0); normc.ngeo[1] = (int)PyArray_DIM(p_geo, 1); normc.ncinf[0] = (int)PyArray_DIM(p_cinf, 0); normc.ncinf[1] = (int)PyArray_DIM(p_cinf, 1); normc.nceff[0] = (int)PyArray_DIM(p_ceff, 0); normc.nceff[1] = (int)PyArray_DIM(p_ceff, 1); normc.naxe = (int)PyArray_DIM(p_axe1, 0); normc.nrdt = (int)PyArray_DIM(p_dtp, 0); normc.ncdt = (int)PyArray_DIM(p_dtc, 0); // axial LUTs: axLUT.Nli2rno[0] = (int)PyArray_DIM(p_li2rno, 0); axLUT.Nli2rno[1] = (int)PyArray_DIM(p_li2rno, 1); axLUT.Nli2sn[0] = (int)PyArray_DIM(p_li2sn, 0); axLUT.Nli2sn[1] = (int)PyArray_DIM(p_li2sn, 1); axLUT.Nli2nos = (int)PyArray_DIM(p_li2nos, 0); // sets the device on which to calculate HANDLE_ERROR(cudaSetDevice(Cnt.DEVID)); //<><><><><><><><><><> Call the CUDA stuff now norm_from_components(sino, normc, axLUT, aw2ali, bckts, Cnt); //<><><><><><><><><><> //-- Clear up // norm components Py_DECREF(p_geo); Py_DECREF(p_cinf); Py_DECREF(p_ceff); Py_DECREF(p_axe1); Py_DECREF(p_dtp); Py_DECREF(p_dtnp); Py_DECREF(p_dtc); Py_DECREF(p_axe2); // axLUT Py_DECREF(p_li2rno); Py_DECREF(p_li2sn); Py_DECREF(p_li2nos); // 2D sino index LUT Py_DECREF(p_aw2ali); // singles buckets Py_DECREF(p_bckts); // output sino PyArray_ResolveWritebackIfCopy(p_sino); Py_DECREF(p_sino); Py_INCREF(Py_None); return Py_None; } //==================================================================================================== static PyObject *mmr_pgaps(PyObject *self, PyObject *args) { // output sino PyObject *o_sino; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject *o_txLUT; // Dictionary of scanner constants PyObject *o_mmrcnst; // GPU input sino in span-11 PyObject *o_sng; // Structure of constants Cnst Cnt; int sino_no; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOOi", &o_sino, &o_sng, &o_txLUT, &o_mmrcnst, &sino_no)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as... */ PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyLong_AsLong(pd_A); PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyLong_AsLong(pd_W); PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_SPN); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); PyObject *pd_rngstrt = PyDict_GetItemString(o_mmrcnst, "RNG_STRT"); PyObject *pd_rngend = PyDict_GetItemString(o_mmrcnst, "RNG_END"); Cnt.RNG_STRT = (char)PyLong_AsLong(pd_rngstrt); Cnt.RNG_END = (char)PyLong_AsLong(pd_rngend); // GPU 2D linear sino index into Siemens sino index LUT PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali"); // GPU input sino and the above 2D LUT PyArrayObject *p_sng = NULL; p_sng = (PyArrayObject *)PyArray_FROM_OTF(o_sng, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); // output sino PyArrayObject *p_sino = NULL; p_sino = (PyArrayObject *)PyArray_FROM_OTF(o_sino, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); if (p_sng == NULL || p_aw2ali == NULL || p_sino == NULL) { Py_XDECREF(p_aw2ali); Py_XDECREF(p_sng); PyArray_DiscardWritebackIfCopy(p_sino); Py_XDECREF(p_sino); } int *aw2ali = (int *)PyArray_DATA(p_aw2ali); float *sng = (float *)PyArray_DATA(p_sng); // output sino float *sino = (float *)PyArray_DATA(p_sino); // sets the device on which to calculate HANDLE_ERROR(cudaSetDevice(Cnt.DEVID)); //<><><><><><><><><><><><><><><><><><><><><><> // Run the conversion to sinos with gaps put_gaps(sino, sng, aw2ali, sino_no, Cnt); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up Py_DECREF(p_aw2ali); Py_DECREF(p_sng); PyArray_ResolveWritebackIfCopy(p_sino); Py_DECREF(p_sino); Py_INCREF(Py_None); return Py_None; } //==================================================================================================== static PyObject *mmr_rgaps(PyObject *self, PyObject *args) { PyCuVec<float> *o_sng = NULL; // output sino with gaps removed PyCuVec<float> *o_sino = NULL; // input sino to be reformated with gaps removed PyObject *o_txLUT; // transaxial LUT dictionary (e.g., 2D sino where dead bins are out). PyObject *o_mmrcnst; // Dictionary of scanner constants Cnst Cnt; // Structure of constants if (!PyArg_ParseTuple(args, "O&O&OO", &asPyCuVec_f, &o_sng, &asPyCuVec_f, &o_sino, &o_txLUT, &o_mmrcnst)) return NULL; /* Interpret the input objects as... PyLong_AsLong*/ PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1); PyObject *pd_A = PyDict_GetItemString(o_mmrcnst, "NSANGLES"); Cnt.A = (int)PyLong_AsLong(pd_A); PyObject *pd_W = PyDict_GetItemString(o_mmrcnst, "NSBINS"); Cnt.W = (int)PyLong_AsLong(pd_W); PyObject *pd_SPN = PyDict_GetItemString(o_mmrcnst, "SPN"); Cnt.SPN = (int)PyLong_AsLong(pd_SPN); PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // GPU 2D linear sino index into Siemens sino index LUT PyObject *pd_aw2ali = PyDict_GetItemString(o_txLUT, "aw2ali"); // input sino and the above 2D LUT PyArrayObject *p_aw2ali = NULL; p_aw2ali = (PyArrayObject *)PyArray_FROM_OTF(pd_aw2ali, NPY_INT32, NPY_ARRAY_IN_ARRAY); if (p_aw2ali == NULL) { Py_XDECREF(p_aw2ali); } int *aw2ali = (int *)PyArray_DATA(p_aw2ali); // number of sinogram from the shape of the sino (can be any number especially when using reduced // ring number) int snno = o_sino->shape[0]; //<><><><><><><><><><><><><><><><><><><><><><> HANDLE_ERROR(cudaSetDevice(Cnt.DEVID)); // Run the conversion to GPU sinos remove_gaps(o_sng->vec.data(), o_sino->vec.data(), snno, aw2ali, Cnt); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up Py_DECREF(p_aw2ali); Py_INCREF(Py_None); return Py_None; } void free_capsule(PyObject *capsule) { void *data = PyCapsule_GetPointer(capsule, NULL); free(data); } //==================================================================================================== static PyObject *mmr_span11LUT(PyObject *self, PyObject *args) { // Dictionary of scanner constants PyObject *o_mmrcnst; // Structure of constants Cnst Cnt; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "O", &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Interpret the input objects as... */ PyObject *pd_Naw = PyDict_GetItemString(o_mmrcnst, "Naw"); Cnt.aw = (int)PyLong_AsLong(pd_Naw); PyObject *pd_NSN1 = PyDict_GetItemString(o_mmrcnst, "NSN1"); Cnt.NSN1 = (int)PyLong_AsLong(pd_NSN1); PyObject *pd_NSN11 = PyDict_GetItemString(o_mmrcnst, "NSN11"); Cnt.NSN11 = (int)PyLong_AsLong(pd_NSN11); PyObject *pd_NRNG = PyDict_GetItemString(o_mmrcnst, "NRNG"); Cnt.NRNG = (int)PyLong_AsLong(pd_NRNG); span11LUT span11 = span1_span11(Cnt); npy_intp dims[2]; dims[0] = Cnt.NSN1; PyArrayObject *s1s11_out = (PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT16, span11.li2s11); PyObject *capsule = PyCapsule_New(span11.li2s11, NULL, free_capsule); PyArray_SetBaseObject(s1s11_out, capsule); dims[0] = Cnt.NSN11; PyArrayObject *s1nos_out = (PyArrayObject *)PyArray_SimpleNewFromData(1, dims, NPY_INT8, span11.NSinos); capsule = PyCapsule_New(span11.NSinos, NULL, free_capsule); PyArray_SetBaseObject(s1nos_out, capsule); PyObject *o_out = PyTuple_New(2); PyTuple_SetItem(o_out, 0, PyArray_Return(s1s11_out)); PyTuple_SetItem(o_out, 1, PyArray_Return(s1nos_out)); return o_out; } //==================================================================================================== static PyObject *aux_varon(PyObject *self, PyObject *args) { // M1 (mean) vector PyObject *o_m1; // M2 (variance) vector PyObject *o_m2; // input of instance data X PyObject *o_x; // Dictionary of scanner constants PyObject *o_mmrcnst; // Structure of constants Cnst Cnt; // realisation number int b; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ /* Parse the input tuple */ if (!PyArg_ParseTuple(args, "OOOiO", &o_m1, &o_m2, &o_x, &b, &o_mmrcnst)) return NULL; //^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ PyObject *pd_log = PyDict_GetItemString(o_mmrcnst, "LOG"); Cnt.LOG = (char)PyLong_AsLong(pd_log); PyObject *pd_devid = PyDict_GetItemString(o_mmrcnst, "DEVID"); Cnt.DEVID = (char)PyLong_AsLong(pd_devid); // input sino and the above 2D LUT PyArrayObject *p_m1 = NULL; p_m1 = (PyArrayObject *)PyArray_FROM_OTF(o_m1, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); PyArrayObject *p_m2 = NULL; p_m2 = (PyArrayObject *)PyArray_FROM_OTF(o_m2, NPY_FLOAT32, NPY_ARRAY_INOUT_ARRAY2); PyArrayObject *p_x = NULL; p_x = (PyArrayObject *)PyArray_FROM_OTF(o_x, NPY_FLOAT32, NPY_ARRAY_IN_ARRAY); if (p_m1 == NULL || p_m2 == NULL || p_x == NULL) { PyArray_DiscardWritebackIfCopy(p_m1); PyArray_DiscardWritebackIfCopy(p_m2); Py_XDECREF(p_m1); Py_XDECREF(p_m2); Py_XDECREF(p_x); } float *m1 = (float *)PyArray_DATA(p_m1); float *m2 = (float *)PyArray_DATA(p_m2); float *x = (float *)PyArray_DATA(p_x); int ndim = PyArray_NDIM(p_x); size_t nele = 1; for (int i = 0; i < ndim; i++) { nele *= PyArray_DIM(p_x, i); } printf("i> number of elements in data array: %lu\n", nele); // sets the device on which to calculate HANDLE_ERROR(cudaSetDevice(Cnt.DEVID)); //<><><><><><><><><><><><><><><><><><><><><><> // Update variance online (M1, M2) using data instance X var_online(m1, m2, x, b, nele); //<><><><><><><><><><><><><><><><><><><><><><> // Clean up PyArray_ResolveWritebackIfCopy(p_m1); PyArray_ResolveWritebackIfCopy(p_m2); Py_DECREF(p_m1); Py_DECREF(p_m2); Py_DECREF(p_x); Py_INCREF(Py_None); return Py_None; }
BCECriterion.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "common.h" #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> const float eps = 1e-12f; struct bce_functor { template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); return - (t * logf(o + eps) + (1.f - t) * logf(1.f - o + eps)); } }; struct bce_functor_weights { template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); float w = thrust::get<2>(x); return - w * (t * logf(o + eps) + (1.f - t) * logf(1.f - o + eps)); } }; void THNN_CudaBCECriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights) { THCUNN_assertSameGPU(state, 3, input, target, weights); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum; if (weights) { weights = THCudaTensor_newContiguous(state, weights); thrust::device_ptr<float> weights_data(THCudaTensor_data(state, weights)); sum = thrust::transform_reduce( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), bce_functor_weights(), (float) 0.f, thrust::plus<float>() ); THCudaTensor_free(state, weights); } else { sum = thrust::transform_reduce( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), bce_functor(), (float) 0.f, thrust::plus<float>() ); } if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct bce_updateGradInput_functor { const float norm; bce_updateGradInput_functor(float norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); return - (t - o) / ((1 - o + eps) * (o + eps)) * norm; } }; struct bce_updateGradInput_functor_weights { const float norm; bce_updateGradInput_functor_weights(float norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); float w = thrust::get<2>(x); return - (t - o) / ((1 - o + eps) * (o + eps)) * norm * w; } }; void THNN_CudaBCECriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights) { THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights); long size = THCudaTensor_nElement(state, input); float norm = (sizeAverage ? 1./size : 1.); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); if (weights) { weights = THCudaTensor_newContiguous(state, weights); thrust::device_ptr<float> weights_data(THCudaTensor_data(state, weights)); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), gradInput_data, bce_updateGradInput_functor_weights(norm) ); THCudaTensor_free(state, weights); } else { thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), gradInput_data, bce_updateGradInput_functor(norm) ); } THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
BCECriterion.cu
#include "THCUNN.h" #include "common.h" #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> const float eps = 1e-12f; struct bce_functor { template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); return - (t * logf(o + eps) + (1.f - t) * logf(1.f - o + eps)); } }; struct bce_functor_weights { template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); float w = thrust::get<2>(x); return - w * (t * logf(o + eps) + (1.f - t) * logf(1.f - o + eps)); } }; void THNN_CudaBCECriterion_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *output, bool sizeAverage, THCudaTensor *weights) { THCUNN_assertSameGPU(state, 3, input, target, weights); long size = THCudaTensor_nElement(state, input); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); float sum; if (weights) { weights = THCudaTensor_newContiguous(state, weights); thrust::device_ptr<float> weights_data(THCudaTensor_data(state, weights)); sum = thrust::transform_reduce( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), bce_functor_weights(), (float) 0.f, thrust::plus<float>() ); THCudaTensor_free(state, weights); } else { sum = thrust::transform_reduce( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), bce_functor(), (float) 0.f, thrust::plus<float>() ); } if (sizeAverage) sum /= size; THCudaTensor_free(state, input); THCudaTensor_free(state, target); THCudaTensor_set1d(state, output, 0, sum); } struct bce_updateGradInput_functor { const float norm; bce_updateGradInput_functor(float norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); return - (t - o) / ((1 - o + eps) * (o + eps)) * norm; } }; struct bce_updateGradInput_functor_weights { const float norm; bce_updateGradInput_functor_weights(float norm_) : norm(norm_) {} template <class Tuple> __host__ __device__ float operator()(Tuple x) { float o = thrust::get<0>(x); float t = thrust::get<1>(x); float w = thrust::get<2>(x); return - (t - o) / ((1 - o + eps) * (o + eps)) * norm * w; } }; void THNN_CudaBCECriterion_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *target, THCudaTensor *gradInput, bool sizeAverage, THCudaTensor *weights) { THCUNN_assertSameGPU(state, 4, input, target, gradInput, weights); long size = THCudaTensor_nElement(state, input); float norm = (sizeAverage ? 1./size : 1.); input = THCudaTensor_newContiguous(state, input); target = THCudaTensor_newContiguous(state, target); THCudaTensor_resizeAs(state, gradInput, input); thrust::device_ptr<float> input_data(THCudaTensor_data(state, input)); thrust::device_ptr<float> target_data(THCudaTensor_data(state, target)); thrust::device_ptr<float> gradInput_data(THCudaTensor_data(state, gradInput)); if (weights) { weights = THCudaTensor_newContiguous(state, weights); thrust::device_ptr<float> weights_data(THCudaTensor_data(state, weights)); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data, weights_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size, weights_data+size)), gradInput_data, bce_updateGradInput_functor_weights(norm) ); THCudaTensor_free(state, weights); } else { thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(input_data, target_data)), thrust::make_zip_iterator(thrust::make_tuple(input_data+size, target_data+size)), gradInput_data, bce_updateGradInput_functor(norm) ); } THCudaTensor_free(state, input); THCudaTensor_free(state, target); }
b3217deb148d316fbb9d6d51a3d6b0e0c069c4bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Ingemars rewrite of the julia demo, integrating the OpenGL parts. // The CUDA parts are - intentionally - NOT rewritten, and have some // serious performance problems. Find the problems and make this a // decently performing CUDA program. // Compile with // nvcc -lglut -lGL interactiveJulia.cu -o interactiveJulia #include <GL/glut.h> #include <GL/gl.h> #include <stdio.h> // Image data unsigned char *pixels; int gImageWidth, gImageHeight; unsigned char *dev_bitmap; // Init image data void initBitmap(int width, int height) { pixels = (unsigned char *)malloc(width * height * 4); gImageWidth = width; gImageHeight = height; } #define DIM 1024 // Complex number class struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } }; __device__ int julia( int x, int y, float r, float im) { const float scale = 1.5; float jx = scale * (float)(DIM/2 - x)/(DIM/2); float jy = scale * (float)(DIM/2 - y)/(DIM/2); // hipComplex c(-0.8, 0.156); hipComplex c(r, im); hipComplex a(jx, jy); int i = 0; for (i=0; i<200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return i; } return i; } __global__ void kernel( unsigned char *ptr, float r, float im) { // map from blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = y * DIM + x; /* int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; */ // now calculate the value at that position int juliaValue = julia( x, y, r, im ); ptr[offset*4 + 0] = 255 * juliaValue/200; ptr[offset*4 + 1] = 0; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 255; } float theReal, theImag; // Compute CUDA kernel and display image void Draw() { dim3 grid(64,64); //TIMERS hipEvent_t myEvent, myEvent2; hipEventCreate(&myEvent); hipEventCreate(&myEvent2); hipEventRecord(myEvent, 0); hipEventSynchronize(myEvent); //GPU CALL unsigned char *dev_bitmap; dim3 dimBlock( DIM/64, DIM/64); hipLaunchKernelGGL(( kernel), dim3(grid),dim3(dimBlock), 0, 0, dev_bitmap, theReal, theImag); hipDeviceSynchronize(); //TIMERS hipEventRecord(myEvent2, 0); hipEventSynchronize(myEvent2); float theTime; hipEventElapsedTime(&theTime, myEvent, myEvent2); hipEventDestroy(myEvent); hipEventDestroy(myEvent2); printf("The gpu calculation took: %0.2f ms\n", theTime); hipMemcpy( pixels, dev_bitmap, gImageWidth*gImageHeight*4, hipMemcpyDeviceToHost ); // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels ); glutSwapBuffers(); } void MouseMovedProc(int x, int y) { theReal = -0.5 + (float)(x-400) / 500.0; theImag = -0.5 + (float)(y-400) / 500.0; printf("real = %f, imag = %f\n", theReal, theImag); glutPostRedisplay (); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA ); glutInitWindowSize( DIM, DIM ); glutCreateWindow("CUDA on live GL"); glutDisplayFunc(Draw); glutPassiveMotionFunc(MouseMovedProc); initBitmap(DIM, DIM); hipMalloc( &dev_bitmap, gImageWidth*gImageHeight*4 ); glutMainLoop(); hipFree( dev_bitmap ); }
b3217deb148d316fbb9d6d51a3d6b0e0c069c4bc.cu
// Ingemars rewrite of the julia demo, integrating the OpenGL parts. // The CUDA parts are - intentionally - NOT rewritten, and have some // serious performance problems. Find the problems and make this aČ // decently performing CUDA program. // Compile with // nvcc -lglut -lGL interactiveJulia.cu -o interactiveJulia #include <GL/glut.h> #include <GL/gl.h> #include <stdio.h> // Image data unsigned char *pixels; int gImageWidth, gImageHeight; unsigned char *dev_bitmap; // Init image data void initBitmap(int width, int height) { pixels = (unsigned char *)malloc(width * height * 4); gImageWidth = width; gImageHeight = height; } #define DIM 1024 // Complex number class struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } }; __device__ int julia( int x, int y, float r, float im) { const float scale = 1.5; float jx = scale * (float)(DIM/2 - x)/(DIM/2); float jy = scale * (float)(DIM/2 - y)/(DIM/2); // cuComplex c(-0.8, 0.156); cuComplex c(r, im); cuComplex a(jx, jy); int i = 0; for (i=0; i<200; i++) { a = a * a + c; if (a.magnitude2() > 1000) return i; } return i; } __global__ void kernel( unsigned char *ptr, float r, float im) { // map from blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int offset = y * DIM + x; /* int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; */ // now calculate the value at that position int juliaValue = julia( x, y, r, im ); ptr[offset*4 + 0] = 255 * juliaValue/200; ptr[offset*4 + 1] = 0; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 255; } float theReal, theImag; // Compute CUDA kernel and display image void Draw() { dim3 grid(64,64); //TIMERS cudaEvent_t myEvent, myEvent2; cudaEventCreate(&myEvent); cudaEventCreate(&myEvent2); cudaEventRecord(myEvent, 0); cudaEventSynchronize(myEvent); //GPU CALL unsigned char *dev_bitmap; dim3 dimBlock( DIM/64, DIM/64); kernel<<<grid,dimBlock>>>( dev_bitmap, theReal, theImag); cudaThreadSynchronize(); //TIMERS cudaEventRecord(myEvent2, 0); cudaEventSynchronize(myEvent2); float theTime; cudaEventElapsedTime(&theTime, myEvent, myEvent2); cudaEventDestroy(myEvent); cudaEventDestroy(myEvent2); printf("The gpu calculation took: %0.2f ms\n", theTime); cudaMemcpy( pixels, dev_bitmap, gImageWidth*gImageHeight*4, cudaMemcpyDeviceToHost ); // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); glDrawPixels( gImageWidth, gImageHeight, GL_RGBA, GL_UNSIGNED_BYTE, pixels ); glutSwapBuffers(); } void MouseMovedProc(int x, int y) { theReal = -0.5 + (float)(x-400) / 500.0; theImag = -0.5 + (float)(y-400) / 500.0; printf("real = %f, imag = %f\n", theReal, theImag); glutPostRedisplay (); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA ); glutInitWindowSize( DIM, DIM ); glutCreateWindow("CUDA on live GL"); glutDisplayFunc(Draw); glutPassiveMotionFunc(MouseMovedProc); initBitmap(DIM, DIM); cudaMalloc( &dev_bitmap, gImageWidth*gImageHeight*4 ); glutMainLoop(); cudaFree( dev_bitmap ); }
e523354147b820bc96cd4a87c35cad4ab6ccfb57.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2011 @author Azzam Haidar @author Tingxing Dong @precisions normal z -> s d c */ #include "common_magma.h" #include "magmablas.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_z #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column ////////////////////////////////////////////////////////////////////////////////////////// extern __shared__ magmaDoubleComplex shared_data[]; extern __shared__ double sdata[]; extern __shared__ int int_sdata[]; /* routines in this file are used by zgetf2_batched.cu */ ////////////////////////////////////////////////////////////////////////////////////////// __device__ int izamax_devfunc(int length, const magmaDoubleComplex *x, int incx, double *shared_x, int *shared_idx) { int tx = threadIdx.x; magmaDoubleComplex res; double res1; int nchunk = (length-1)/zamax + 1; if( tx < zamax ){ shared_x[tx] = 0.0; shared_idx[tx] = tx;//-1;// -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output } __syncthreads(); for(int s =0 ; s < nchunk; s++) { if( (tx + s * zamax < length) && (tx < zamax) ) { res = x[(tx + s * zamax) * incx]; res1 = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res)); if( res1 > shared_x[tx] ) { shared_x[tx] = res1; shared_idx[tx] = tx + s * zamax; } } __syncthreads(); } if(length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax magma_getidmax<zamax>(tx, shared_x, shared_idx); else magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx); return shared_idx[0]; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void izamax_kernel_batched(int length, int chunk, magmaDoubleComplex **x_array, int incx, int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep) { magmaDoubleComplex *x_start = x_array[blockIdx.z]; const magmaDoubleComplex *x = &(x_start[step + step * lda]); magma_int_t *ipiv = ipiv_array[blockIdx.z]; int tx = threadIdx.x; double *shared_x = sdata; int *shared_idx = (int*)(shared_x + zamax); izamax_devfunc(length, x, incx, shared_x, shared_idx); if(tx == 0){ ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO){ info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void tree_izamax_kernel_batched(int length, magmaDoubleComplex **x_array, int incx, int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array) { magmaDoubleComplex *x_start = x_array[blockIdx.z]; const magmaDoubleComplex *x = &(x_start[step + step * lda]); double *data_pool = data_pool_array[blockIdx.z]; magma_int_t *id_pool = id_pool_array[blockIdx.z]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; int tx = threadIdx.x; int local_max_id; __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; x += zamax * blockIdx.x * incx; izamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx); if(tx ==0) { local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset if(gridDim.x == 1) { ipiv[step] = local_max_id + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO) info_array[blockIdx.z] = local_max_id + step + gbstep + 1; } else { // put each thread block local max and its index in workspace data_pool[blockIdx.x] = shared_x[0]; id_pool[blockIdx.x] = local_max_id; } } } __global__ void tree_izamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array) { __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; double *data_pool = data_pool_array[blockIdx.z]; magma_int_t *id_pool = id_pool_array[blockIdx.z]; int tx = threadIdx.x; //read data if( tx < n) { shared_x[tx] = data_pool[tx]; shared_idx[tx] = id_pool[tx]; } else { shared_x[tx] = 0.0; shared_idx[tx] = -2; } __syncthreads(); // compute local result inside each thread block magma_getidmax<zamax>(tx, shared_x, shared_idx); if(tx == 0 ) { ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO) info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1; } } magma_int_t magma_izamax_lg_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda, magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { if(length == 1) return 0; if(incx < 0) return 1; double* data_pool; magma_int_t* id_pool; double** data_pool_array = NULL; magma_int_t** id_pool_array = NULL; magma_int_t num_blocks = (length-1)/(zamax) + 1; // creat pools(data and index) to store the result of each thread blocks magma_dmalloc(&data_pool, num_blocks * batchCount); magma_imalloc(&id_pool, num_blocks * batchCount); magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array)); magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array)); #if defined(PRECISION_z) || defined(PRECISION_d) dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue); #else sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue); #endif set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue); if( num_blocks > zamax) { printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n", length, num_blocks, zamax); } else { // first level tree reduction dim3 grid(num_blocks, 1, batchCount); hipLaunchKernelGGL(( tree_izamax_kernel_batched), dim3(grid), dim3(zamax), 0, queue, length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array); if( num_blocks > 1) { // second level tree reduction dim3 grid2(1, 1, batchCount); hipLaunchKernelGGL(( tree_izamax_kernel2_batched), dim3(grid2), dim3(zamax), 0, queue, num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array); } } magma_free(data_pool); magma_free(id_pool); magma_free(data_pool_array); magma_free(id_pool_array); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_izamax_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda, magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { if(length == 0 ) return 0; #if 1 dim3 grid(1, 1, batchCount); int chunk = (length-1)/zamax + 1; hipLaunchKernelGGL(( izamax_kernel_batched), dim3(grid), dim3(zamax), zamax * (sizeof(double) + sizeof(int)), queue , length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep); #else // the magma_izamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the izamax_kernel for that today we are using only izamax_kernel if( length <= 10 * zamax ) { dim3 grid(1, 1, batchCount); int chunk = (length-1)/zamax + 1; hipLaunchKernelGGL(( izamax_kernel_batched), dim3(grid), dim3(zamax), zamax * (sizeof(double) + sizeof(magma_int_t)), queue , length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep); } else { magma_izamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount); } #endif return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zswap_kernel_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array) { magmaDoubleComplex *x = x_array[blockIdx.z]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; __shared__ int jp; if(threadIdx.x == 0) { jp = ipiv[step] - 1; //if(blockIdx.z == 1) printf("jp=%d", jp); } __syncthreads(); if(jp == step) return; // no pivot int id = threadIdx.x; if (id < n) { magmaDoubleComplex tmp = x[jp + incx*id]; x[jp + incx*id] = x[step + incx*id]; x[step + incx*id] = tmp; } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zswap_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array, magma_int_t batchCount, magma_queue_t queue) { /* zswap two row: (ipiv[step]-1)th and jth */ if( n > MAX_NTHREADS) { printf("magma_zswap_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS); return -15; } dim3 grid(1,1, batchCount); hipLaunchKernelGGL(( zswap_kernel_batched), dim3(grid), dim3(n), 0, queue , n, x_array, incx, step, ipiv_array); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zscal_zgeru_kernel_batched(int m, int n, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t *info_array, int gbstep) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A = &(A_start[step + step * lda]); magmaDoubleComplex *shared_y = shared_data; int tx = threadIdx.x; int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x; if (tx < n) { shared_y[tx] = A[lda * tx]; } __syncthreads(); if(shared_y[0] == MAGMA_Z_ZERO) { info_array[blockIdx.z] = step + gbstep + 1; return; } if (gbidx < m && gbidx > 0) { magmaDoubleComplex reg = MAGMA_Z_ZERO; reg = A[gbidx]; reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]); A[gbidx] = reg; #pragma unroll for(int i=1; i < n; i++) { //A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg;//cuda give wrong results with this one //A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one A[gbidx + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zscal_zgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { /* Specialized kernel which merged zscal and zgeru the two kernels 1) zscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ if( n == 0) return 0; if( n > MAX_NTHREADS) { printf("magma_zscal_zgeru_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS); return -15; } int nchunk = (m-1)/MAX_NTHREADS + 1; size_t shared_size = sizeof(magmaDoubleComplex)*(n); dim3 grid(nchunk, 1, batchCount); hipLaunchKernelGGL(( zscal_zgeru_kernel_batched), dim3(grid), dim3(min(m, MAX_NTHREADS)), shared_size, queue, m, n, step, dA_array, lda, info_array, gbstep); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zgetf2trsm_kernel_batched(int ib, int n, magmaDoubleComplex **dA_array, int step, int lda) { /* this kernel does the safe nonblocked TRSM operation B = A^-1 * B */ magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A = &(A_start[step + step * lda]); magmaDoubleComplex *B = &(A_start[step + (step+ib) * lda]); magmaDoubleComplex *shared_a = shared_data; magmaDoubleComplex *shared_b = shared_data+ib*ib; int tid = threadIdx.x; int i,d; // Read A and B at the same time to the shared memory (shared_a shared_b) // note that shared_b = shared_a+ib*ib so its contiguous // I can make it in one loop reading if ( tid < ib) { #pragma unroll for( i=0; i < n+ib; i++) { shared_a[tid + i*ib] = A[tid + i*lda]; } } __syncthreads(); if (tid < n) { #pragma unroll for( d=0; d<ib-1; d++) { for( i=d+1; i<ib; i++) { shared_b[i+tid*ib] += (MAGMA_Z_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib]; } } } __syncthreads(); // write back B if ( tid < ib) { #pragma unroll for( i=0; i < n; i++) { B[tid + i*lda] = shared_b[tid + i*ib]; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" void magma_zgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t step, magma_int_t lda, magma_int_t batchCount, magma_queue_t queue) { /* */ if( n == 0 || ib == 0 ) return; size_t shared_size = sizeof(magmaDoubleComplex)*(ib*(ib+n)); // TODO TODO TODO if( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra { printf("kernel_zgetf2trsm error out of shared memory \n"); return; } dim3 grid(1, 1, batchCount); hipLaunchKernelGGL(( zgetf2trsm_kernel_batched), dim3(grid), dim3(max(n,ib)), shared_size, queue, ib, n, dA_array, step, lda); } //////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zupdate_device(int m, int step, magmaDoubleComplex* x, int ldx, magmaDoubleComplex *A, int lda) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; int indx; //magmaDoubleComplex reg = MAGMA_Z_ZERO; // update the current column by all the previous one #pragma unroll for(int i=0; i < step; i++) { for(int s=0 ; s < nchunk; s++) { indx = tid + s * MAX_NTHREADS; if ( indx > i && indx < m ) { A[indx] -= A[i] * x[indx + i*ldx]; //printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i); } } __syncthreads(); } //printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg); } //////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zscal5_device(int m, magmaDoubleComplex* x, magmaDoubleComplex alpha) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) { #if 0 x[tid + s * MAX_NTHREADS] *= MAGMA_Z_DIV(MAGMA_Z_ONE, alpha); #else x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha; #endif } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep) { int gboff = paneloffset+step; magma_int_t *ipiv = ipiv_array[blockIdx.z]; magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]); magmaDoubleComplex *A00 = &(A_start[paneloffset + paneloffset * lda]); magmaDoubleComplex *shared_A = shared_data; __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; __shared__ magmaDoubleComplex alpha; int tid = threadIdx.x; // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int nchunk = (m-1)/MAX_NTHREADS + 1; // read the current column from dev to shared memory for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS]; } __syncthreads(); // update this column if( step > 0 ){ zupdate_device( m, step, A00, lda, shared_A, 1); __syncthreads(); } // if( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE. // So let all htreads call this routine it will handle correctly based on the size // note that izamax need only 128 threads, s izamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx); if(tid == 0){ ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing alpha = shared_A[shared_idx[0]+step]; //printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha); if(shared_x[0] == MAGMA_D_ZERO){ info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1; } } __syncthreads(); if(shared_x[0] == MAGMA_D_ZERO) return; __syncthreads(); // DO NO PUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE. zscal5_device( m-step, shared_A+step, alpha); // put back the pivot that has been scaled with itself menaing =1 if(tid == 0) shared_A[shared_idx[0] + step] = alpha; __syncthreads(); // write back from shared to dev memory for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) { A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS]; //printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]); } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zcomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t **ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { /* Specialized kernel which merged zscal and zgeru the two kernels 1) zscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ if( m == 0) return 0; size_t all_shmem_size = zamax*(sizeof(double)+sizeof(int)) + (m+2)*sizeof(magmaDoubleComplex); if( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra { printf("magma_zcomputecolumn_batched error out of shared memory \n"); return -20; } size_t shared_size = sizeof(magmaDoubleComplex)*m; dim3 grid(1, 1, batchCount); hipLaunchKernelGGL(( zcomputecolumn_kernel_shared_batched), dim3(grid), dim3(min(m, MAX_NTHREADS)), shared_size, queue, m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep); return 0; } ////////////////////////////////////////////////////////////////////////////////////////////////////
e523354147b820bc96cd4a87c35cad4ab6ccfb57.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2011 @author Azzam Haidar @author Tingxing Dong @precisions normal z -> s d c */ #include "common_magma.h" #include "magmablas.h" #include "batched_kernel_param.h" #include "magma_templates.h" #define PRECISION_z #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column ////////////////////////////////////////////////////////////////////////////////////////// extern __shared__ magmaDoubleComplex shared_data[]; extern __shared__ double sdata[]; extern __shared__ int int_sdata[]; /* routines in this file are used by zgetf2_batched.cu */ ////////////////////////////////////////////////////////////////////////////////////////// __device__ int izamax_devfunc(int length, const magmaDoubleComplex *x, int incx, double *shared_x, int *shared_idx) { int tx = threadIdx.x; magmaDoubleComplex res; double res1; int nchunk = (length-1)/zamax + 1; if( tx < zamax ){ shared_x[tx] = 0.0; shared_idx[tx] = tx;//-1;// -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output } __syncthreads(); for(int s =0 ; s < nchunk; s++) { if( (tx + s * zamax < length) && (tx < zamax) ) { res = x[(tx + s * zamax) * incx]; res1 = fabs(MAGMA_Z_REAL(res)) + fabs(MAGMA_Z_IMAG(res)); if( res1 > shared_x[tx] ) { shared_x[tx] = res1; shared_idx[tx] = tx + s * zamax; } } __syncthreads(); } if(length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax magma_getidmax<zamax>(tx, shared_x, shared_idx); else magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx); return shared_idx[0]; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void izamax_kernel_batched(int length, int chunk, magmaDoubleComplex **x_array, int incx, int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep) { magmaDoubleComplex *x_start = x_array[blockIdx.z]; const magmaDoubleComplex *x = &(x_start[step + step * lda]); magma_int_t *ipiv = ipiv_array[blockIdx.z]; int tx = threadIdx.x; double *shared_x = sdata; int *shared_idx = (int*)(shared_x + zamax); izamax_devfunc(length, x, incx, shared_x, shared_idx); if(tx == 0){ ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO){ info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void tree_izamax_kernel_batched(int length, magmaDoubleComplex **x_array, int incx, int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array) { magmaDoubleComplex *x_start = x_array[blockIdx.z]; const magmaDoubleComplex *x = &(x_start[step + step * lda]); double *data_pool = data_pool_array[blockIdx.z]; magma_int_t *id_pool = id_pool_array[blockIdx.z]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; int tx = threadIdx.x; int local_max_id; __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; x += zamax * blockIdx.x * incx; izamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx); if(tx ==0) { local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset if(gridDim.x == 1) { ipiv[step] = local_max_id + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO) info_array[blockIdx.z] = local_max_id + step + gbstep + 1; } else { // put each thread block local max and its index in workspace data_pool[blockIdx.x] = shared_x[0]; id_pool[blockIdx.x] = local_max_id; } } } __global__ void tree_izamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, double** data_pool_array, magma_int_t** id_pool_array) { __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; double *data_pool = data_pool_array[blockIdx.z]; magma_int_t *id_pool = id_pool_array[blockIdx.z]; int tx = threadIdx.x; //read data if( tx < n) { shared_x[tx] = data_pool[tx]; shared_idx[tx] = id_pool[tx]; } else { shared_x[tx] = 0.0; shared_idx[tx] = -2; } __syncthreads(); // compute local result inside each thread block magma_getidmax<zamax>(tx, shared_x, shared_idx); if(tx == 0 ) { ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing if(shared_x[0] == MAGMA_D_ZERO) info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1; } } magma_int_t magma_izamax_lg_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda, magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { if(length == 1) return 0; if(incx < 0) return 1; double* data_pool; magma_int_t* id_pool; double** data_pool_array = NULL; magma_int_t** id_pool_array = NULL; magma_int_t num_blocks = (length-1)/(zamax) + 1; // creat pools(data and index) to store the result of each thread blocks magma_dmalloc(&data_pool, num_blocks * batchCount); magma_imalloc(&id_pool, num_blocks * batchCount); magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array)); magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array)); #if defined(PRECISION_z) || defined(PRECISION_d) dset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue); #else sset_pointer(data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue); #endif set_ipointer(id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue); if( num_blocks > zamax) { printf("length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax \n", length, num_blocks, zamax); } else { // first level tree reduction dim3 grid(num_blocks, 1, batchCount); tree_izamax_kernel_batched<<<grid, zamax, 0, queue>>>(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array); if( num_blocks > 1) { // second level tree reduction dim3 grid2(1, 1, batchCount); tree_izamax_kernel2_batched<<<grid2, zamax, 0, queue>>>(num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array); } } magma_free(data_pool); magma_free(id_pool); magma_free(data_pool_array); magma_free(id_pool_array); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_izamax_batched(magma_int_t length, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda, magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { if(length == 0 ) return 0; #if 1 dim3 grid(1, 1, batchCount); int chunk = (length-1)/zamax + 1; izamax_kernel_batched<<< grid, zamax, zamax * (sizeof(double) + sizeof(int)), queue >>> (length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep); #else // the magma_izamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the izamax_kernel for that today we are using only izamax_kernel if( length <= 10 * zamax ) { dim3 grid(1, 1, batchCount); int chunk = (length-1)/zamax + 1; izamax_kernel_batched<<< grid, zamax, zamax * (sizeof(double) + sizeof(magma_int_t)), queue >>> (length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep); } else { magma_izamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount); } #endif return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zswap_kernel_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array) { magmaDoubleComplex *x = x_array[blockIdx.z]; magma_int_t *ipiv = ipiv_array[blockIdx.z]; __shared__ int jp; if(threadIdx.x == 0) { jp = ipiv[step] - 1; //if(blockIdx.z == 1) printf("jp=%d", jp); } __syncthreads(); if(jp == step) return; // no pivot int id = threadIdx.x; if (id < n) { magmaDoubleComplex tmp = x[jp + incx*id]; x[jp + incx*id] = x[step + incx*id]; x[step + incx*id] = tmp; } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zswap_batched(magma_int_t n, magmaDoubleComplex **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array, magma_int_t batchCount, magma_queue_t queue) { /* zswap two row: (ipiv[step]-1)th and jth */ if( n > MAX_NTHREADS) { printf("magma_zswap_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS); return -15; } dim3 grid(1,1, batchCount); zswap_kernel_batched<<< grid, n, 0, queue >>>(n, x_array, incx, step, ipiv_array); return 0; } ///////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zscal_zgeru_kernel_batched(int m, int n, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t *info_array, int gbstep) { // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A = &(A_start[step + step * lda]); magmaDoubleComplex *shared_y = shared_data; int tx = threadIdx.x; int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x; if (tx < n) { shared_y[tx] = A[lda * tx]; } __syncthreads(); if(shared_y[0] == MAGMA_Z_ZERO) { info_array[blockIdx.z] = step + gbstep + 1; return; } if (gbidx < m && gbidx > 0) { magmaDoubleComplex reg = MAGMA_Z_ZERO; reg = A[gbidx]; reg *= MAGMA_Z_DIV(MAGMA_Z_ONE, shared_y[0]); A[gbidx] = reg; #pragma unroll for(int i=1; i < n; i++) { //A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg;//cuda give wrong results with this one //A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one A[gbidx + i*lda] += (MAGMA_Z_NEG_ONE) * shared_y[i] * reg; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zscal_zgeru_batched(magma_int_t m, magma_int_t n, magma_int_t step, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { /* Specialized kernel which merged zscal and zgeru the two kernels 1) zscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ if( n == 0) return 0; if( n > MAX_NTHREADS) { printf("magma_zscal_zgeru_batched nb=%d, > %d, not supported \n",n, MAX_NTHREADS); return -15; } int nchunk = (m-1)/MAX_NTHREADS + 1; size_t shared_size = sizeof(magmaDoubleComplex)*(n); dim3 grid(nchunk, 1, batchCount); zscal_zgeru_kernel_batched<<< grid, min(m, MAX_NTHREADS), shared_size, queue>>>(m, n, step, dA_array, lda, info_array, gbstep); return 0; } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zgetf2trsm_kernel_batched(int ib, int n, magmaDoubleComplex **dA_array, int step, int lda) { /* this kernel does the safe nonblocked TRSM operation B = A^-1 * B */ magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A = &(A_start[step + step * lda]); magmaDoubleComplex *B = &(A_start[step + (step+ib) * lda]); magmaDoubleComplex *shared_a = shared_data; magmaDoubleComplex *shared_b = shared_data+ib*ib; int tid = threadIdx.x; int i,d; // Read A and B at the same time to the shared memory (shared_a shared_b) // note that shared_b = shared_a+ib*ib so its contiguous // I can make it in one loop reading if ( tid < ib) { #pragma unroll for( i=0; i < n+ib; i++) { shared_a[tid + i*ib] = A[tid + i*lda]; } } __syncthreads(); if (tid < n) { #pragma unroll for( d=0; d<ib-1; d++) { for( i=d+1; i<ib; i++) { shared_b[i+tid*ib] += (MAGMA_Z_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib]; } } } __syncthreads(); // write back B if ( tid < ib) { #pragma unroll for( i=0; i < n; i++) { B[tid + i*lda] = shared_b[tid + i*ib]; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" void magma_zgetf2trsm_batched(magma_int_t ib, magma_int_t n, magmaDoubleComplex **dA_array, magma_int_t step, magma_int_t lda, magma_int_t batchCount, magma_queue_t queue) { /* */ if( n == 0 || ib == 0 ) return; size_t shared_size = sizeof(magmaDoubleComplex)*(ib*(ib+n)); // TODO TODO TODO if( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra { printf("kernel_zgetf2trsm error out of shared memory \n"); return; } dim3 grid(1, 1, batchCount); zgetf2trsm_kernel_batched<<< grid, max(n,ib), shared_size, queue>>>(ib, n, dA_array, step, lda); } //////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zupdate_device(int m, int step, magmaDoubleComplex* x, int ldx, magmaDoubleComplex *A, int lda) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; int indx; //magmaDoubleComplex reg = MAGMA_Z_ZERO; // update the current column by all the previous one #pragma unroll for(int i=0; i < step; i++) { for(int s=0 ; s < nchunk; s++) { indx = tid + s * MAX_NTHREADS; if ( indx > i && indx < m ) { A[indx] -= A[i] * x[indx + i*ldx]; //printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d \n", step, tid, x[tid + i*ldx], A[i], A[tid],i); } } __syncthreads(); } //printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg); } //////////////////////////////////////////////////////////////////////////////////////////////////// static __device__ void zscal5_device(int m, magmaDoubleComplex* x, magmaDoubleComplex alpha) { int tid = threadIdx.x; int nchunk = (m-1)/MAX_NTHREADS + 1; for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) { #if 0 x[tid + s * MAX_NTHREADS] *= MAGMA_Z_DIV(MAGMA_Z_ONE, alpha); #else x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha; #endif } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, magmaDoubleComplex **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep) { int gboff = paneloffset+step; magma_int_t *ipiv = ipiv_array[blockIdx.z]; magmaDoubleComplex *A_start = dA_array[blockIdx.z]; magmaDoubleComplex *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]); magmaDoubleComplex *A00 = &(A_start[paneloffset + paneloffset * lda]); magmaDoubleComplex *shared_A = shared_data; __shared__ double shared_x[zamax]; __shared__ int shared_idx[zamax]; __shared__ magmaDoubleComplex alpha; int tid = threadIdx.x; // checkinfo to avoid computation of the singular matrix if(info_array[blockIdx.z] != 0 ) return; int nchunk = (m-1)/MAX_NTHREADS + 1; // read the current column from dev to shared memory for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS]; } __syncthreads(); // update this column if( step > 0 ){ zupdate_device( m, step, A00, lda, shared_A, 1); __syncthreads(); } // if( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE. // So let all htreads call this routine it will handle correctly based on the size // note that izamax need only 128 threads, s izamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx); if(tid == 0){ ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing alpha = shared_A[shared_idx[0]+step]; //printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f \n",step,ipiv[gboff],gboff,shared_idx[0],alpha); if(shared_x[0] == MAGMA_D_ZERO){ info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1; } } __syncthreads(); if(shared_x[0] == MAGMA_D_ZERO) return; __syncthreads(); // DO NO PUT THE IF CONDITION HERE SINCE izamax_devfunc HAS __syncthreads INSIDE. zscal5_device( m-step, shared_A+step, alpha); // put back the pivot that has been scaled with itself menaing =1 if(tid == 0) shared_A[shared_idx[0] + step] = alpha; __syncthreads(); // write back from shared to dev memory for(int s=0 ; s < nchunk; s++) { if( (tid + s * MAX_NTHREADS) < m ) { A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS]; //printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]); } } __syncthreads(); } //////////////////////////////////////////////////////////////////////////////////////////////////// extern "C" magma_int_t magma_zcomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step, magmaDoubleComplex **dA_array, magma_int_t lda, magma_int_t **ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue) { /* Specialized kernel which merged zscal and zgeru the two kernels 1) zscale the first column vector A(1:M-1,0) with 1/A(0,0); 2) Performe a zgeru Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1); */ if( m == 0) return 0; size_t all_shmem_size = zamax*(sizeof(double)+sizeof(int)) + (m+2)*sizeof(magmaDoubleComplex); if( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra { printf("magma_zcomputecolumn_batched error out of shared memory \n"); return -20; } size_t shared_size = sizeof(magmaDoubleComplex)*m; dim3 grid(1, 1, batchCount); zcomputecolumn_kernel_shared_batched<<< grid, min(m, MAX_NTHREADS), shared_size, queue>>>(m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep); return 0; } ////////////////////////////////////////////////////////////////////////////////////////////////////
37e7272680e1fae093b001e4d1d0f9fe6a759f3c.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "math.hpp" #include "types.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/kernels/scale_shift.hpp" #include <opencv2/core.hpp> #include <cstddef> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void abs_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::abs; vec.data[j] = abs(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void tanh_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::tanh; vec.data[j] = tanh(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void sigmoid_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::sigmoid; vec.data[j] = sigmoid(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void bnll_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::log1pexp; vec.data[j] = vec.data[j] > T(0) ? vec.data[j] + log1pexp(-vec.data[j]) : log1pexp(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void elu_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::expm1; vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : expm1(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void relu_vec(Span<T> output, View<T> input, T slope) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for(int j = 0; j < vector_type::size(); j++) vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : slope * vec.data[j]; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void clipped_relu_vec(Span<T> output, View<T> input, T floor, T ceiling) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { using device::clamp; vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = clamp(vec.data[j], floor, ceiling); v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); inner_size /= vector_type::size(); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type c = (i / inner_size) % static_cast<size_type>(slope.size()); vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c]; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void power_vec(Span<T> output, View<T> input, T exp, T scale, T shift) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { using device::pow; vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = pow(shift + scale * vec.data[j], exp); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> void launch_vectorized_abs(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::abs_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void abs(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_abs<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_abs<T, 2>(stream, output, input); } else { launch_vectorized_abs<T, 1>(stream, output, input); } } template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input); template void abs<float>(const Stream& stream, Span<float> output, View<float> input); template <class T, std::size_t N> void launch_vectorized_tanh(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::tanh_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void tanh(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_tanh<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_tanh<T, 2>(stream, output, input); } else { launch_vectorized_tanh<T, 1>(stream, output, input); } } template void tanh<__half>(const Stream&, Span<__half>, View<__half>); template void tanh<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_sigmoid(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::sigmoid_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void sigmoid(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_sigmoid<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_sigmoid<T, 2>(stream, output, input); } else { launch_vectorized_sigmoid<T, 1>(stream, output, input); } } template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>); template void sigmoid<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_bnll(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::bnll_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void bnll(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_bnll<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_bnll<T, 2>(stream, output, input); } else { launch_vectorized_bnll<T, 1>(stream, output, input); } } template void bnll<__half>(const Stream&, Span<__half>, View<__half>); template void bnll<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_elu(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::elu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void elu(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_elu<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_elu<T, 2>(stream, output, input); } else { launch_vectorized_elu<T, 1>(stream, output, input); } } template void elu<__half>(const Stream&, Span<__half>, View<__half>); template void elu<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_relu(const Stream& stream, Span<T> output, View<T> input, T slope) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, slope); } template <class T> void relu(const Stream& stream, Span<T> output, View<T> input, T slope) { CV_Assert(input.size() == output.size()); if(is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_relu<T, 4>(stream, output, input, slope); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_relu<T, 2>(stream, output, input, slope); } else { launch_vectorized_relu<T, 1>(stream, output, input, slope); } } template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half); template void relu<float>(const Stream&, Span<float>, View<float>, float); template <class T, std::size_t N> void launch_vectorized_clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::clipped_relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, floor, ceiling); } template <class T> void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) { CV_Assert(input.size() == output.size()); CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); if(is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_clipped_relu<T, 4>(stream, output, input, floor, ceiling); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_clipped_relu<T, 2>(stream, output, input, floor, ceiling); } else { launch_vectorized_clipped_relu<T, 1>(stream, output, input, floor, ceiling); } } template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float); template <class T, std::size_t N> void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::axiswise_relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size, slope); } template <class T> void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) { launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) { launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope); } else { launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope); } } template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>); template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>); template <class T, std::size_t N> void launch_vectorized_power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::power_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, exp, scale, shift); } template <class T> void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) { CV_Assert(input.size() == output.size()); if (static_cast<float>(exp) == 1.0f) { scale1_with_bias1(stream, output, input, scale, shift); return; } if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && output.size()) { launch_vectorized_power<T, 4>(stream, output, input, exp, scale, shift); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && output.size()) { launch_vectorized_power<T, 2>(stream, output, input, exp, scale, shift); } else { launch_vectorized_power<T, 1>(stream, output, input, exp, scale, shift); } } template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); template void power<float>(const Stream&, Span<float>, View<float>, float, float, float); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
37e7272680e1fae093b001e4d1d0f9fe6a759f3c.cu
// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. #include <cuda_runtime.h> #include <cuda_fp16.h> #include "math.hpp" #include "types.hpp" #include "vector_traits.hpp" #include "grid_stride_range.hpp" #include "execution.hpp" #include "../cuda4dnn/csl/stream.hpp" #include "../cuda4dnn/csl/span.hpp" #include "../cuda4dnn/kernels/scale_shift.hpp" #include <opencv2/core.hpp> #include <cstddef> using namespace cv::dnn::cuda4dnn::csl; using namespace cv::dnn::cuda4dnn::csl::device; namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { namespace raw { template <class T, std::size_t N> __global__ void abs_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::abs; vec.data[j] = abs(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void tanh_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::tanh; vec.data[j] = tanh(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void sigmoid_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::sigmoid; vec.data[j] = sigmoid(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void bnll_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::log1pexp; vec.data[j] = vec.data[j] > T(0) ? vec.data[j] + log1pexp(-vec.data[j]) : log1pexp(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void elu_vec(Span<T> output, View<T> input) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) { using device::expm1; vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : expm1(vec.data[j]); } v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void relu_vec(Span<T> output, View<T> input, T slope) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { vector_type vec; v_load(vec, input_vPtr[i]); for(int j = 0; j < vector_type::size(); j++) vec.data[j] = vec.data[j] >= T(0) ? vec.data[j] : slope * vec.data[j]; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void clipped_relu_vec(Span<T> output, View<T> input, T floor, T ceiling) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { using device::clamp; vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = clamp(vec.data[j], floor, ceiling); v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void axiswise_relu_vec(Span<T> output, View<T> input, size_type inner_size, View<T> slope) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); inner_size /= vector_type::size(); for (auto i : grid_stride_range(output.size() / vector_type::size())) { const index_type c = (i / inner_size) % static_cast<size_type>(slope.size()); vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = vec.data[j] > T(0) ? vec.data[j] : vec.data[j] * slope[c]; v_store(output_vPtr[i], vec); } } template <class T, std::size_t N> __global__ void power_vec(Span<T> output, View<T> input, T exp, T scale, T shift) { using vector_type = get_vector_type_t<T, N>; auto output_vPtr = vector_type::get_pointer(output.data()); auto input_vPtr = vector_type::get_pointer(input.data()); for (auto i : grid_stride_range(output.size() / vector_type::size())) { using device::pow; vector_type vec; v_load(vec, input_vPtr[i]); for (int j = 0; j < vector_type::size(); j++) vec.data[j] = pow(shift + scale * vec.data[j], exp); v_store(output_vPtr[i], vec); } } } template <class T, std::size_t N> void launch_vectorized_abs(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::abs_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void abs(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_abs<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_abs<T, 2>(stream, output, input); } else { launch_vectorized_abs<T, 1>(stream, output, input); } } template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input); template void abs<float>(const Stream& stream, Span<float> output, View<float> input); template <class T, std::size_t N> void launch_vectorized_tanh(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::tanh_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void tanh(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_tanh<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_tanh<T, 2>(stream, output, input); } else { launch_vectorized_tanh<T, 1>(stream, output, input); } } template void tanh<__half>(const Stream&, Span<__half>, View<__half>); template void tanh<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_sigmoid(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::sigmoid_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void sigmoid(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_sigmoid<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_sigmoid<T, 2>(stream, output, input); } else { launch_vectorized_sigmoid<T, 1>(stream, output, input); } } template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>); template void sigmoid<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_bnll(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::bnll_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void bnll(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_bnll<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_bnll<T, 2>(stream, output, input); } else { launch_vectorized_bnll<T, 1>(stream, output, input); } } template void bnll<__half>(const Stream&, Span<__half>, View<__half>); template void bnll<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_elu(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::elu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input); } template <class T> void elu(const Stream& stream, Span<T> output, View<T> input) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_elu<T, 4>(stream, output, input); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_elu<T, 2>(stream, output, input); } else { launch_vectorized_elu<T, 1>(stream, output, input); } } template void elu<__half>(const Stream&, Span<__half>, View<__half>); template void elu<float>(const Stream&, Span<float>, View<float>); template <class T, std::size_t N> void launch_vectorized_relu(const Stream& stream, Span<T> output, View<T> input, T slope) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, slope); } template <class T> void relu(const Stream& stream, Span<T> output, View<T> input, T slope) { CV_Assert(input.size() == output.size()); if(is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_relu<T, 4>(stream, output, input, slope); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_relu<T, 2>(stream, output, input, slope); } else { launch_vectorized_relu<T, 1>(stream, output, input, slope); } } template void relu<__half>(const Stream&, Span<__half>, View<__half>, __half); template void relu<float>(const Stream&, Span<float>, View<float>, float); template <class T, std::size_t N> void launch_vectorized_clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::clipped_relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, floor, ceiling); } template <class T> void clipped_relu(const Stream& stream, Span<T> output, View<T> input, T floor, T ceiling) { CV_Assert(input.size() == output.size()); CV_Assert(static_cast<double>(floor) <= static_cast<double>(ceiling)); if(is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4)) { launch_vectorized_clipped_relu<T, 4>(stream, output, input, floor, ceiling); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2)) { launch_vectorized_clipped_relu<T, 2>(stream, output, input, floor, ceiling); } else { launch_vectorized_clipped_relu<T, 1>(stream, output, input, floor, ceiling); } } template void clipped_relu<__half>(const Stream&, Span<__half>, View<__half>, __half, __half); template void clipped_relu<float>(const Stream&, Span<float>, View<float>, float, float); template <class T, std::size_t N> void launch_vectorized_axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); CV_Assert(inner_size % N == 0); auto kernel = raw::axiswise_relu_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, inner_size, slope); } template <class T> void axiswise_relu(const Stream& stream, Span<T> output, View<T> input, std::size_t inner_size, View<T> slope) { CV_Assert(input.size() == output.size()); if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && inner_size % 4 == 0) { launch_vectorized_axiswise_relu<T, 4>(stream, output, input, inner_size, slope); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && inner_size % 2 == 0) { launch_vectorized_axiswise_relu<T, 2>(stream, output, input, inner_size, slope); } else { launch_vectorized_axiswise_relu<T, 1>(stream, output, input, inner_size, slope); } } template void axiswise_relu<__half>(const Stream&, Span<__half>, View<__half>, std::size_t, View<__half>); template void axiswise_relu<float>(const Stream&, Span<float>, View<float>, std::size_t, View<float>); template <class T, std::size_t N> void launch_vectorized_power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) { CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(input, N)); auto kernel = raw::power_vec<T, N>; auto policy = make_policy(kernel, output.size() / N, 0, stream); launch_kernel(kernel, policy, output, input, exp, scale, shift); } template <class T> void power(const Stream& stream, Span<T> output, View<T> input, T exp, T scale, T shift) { CV_Assert(input.size() == output.size()); if (static_cast<float>(exp) == 1.0f) { scale1_with_bias1(stream, output, input, scale, shift); return; } if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(input, 4) && output.size()) { launch_vectorized_power<T, 4>(stream, output, input, exp, scale, shift); } else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(input, 2) && output.size()) { launch_vectorized_power<T, 2>(stream, output, input, exp, scale, shift); } else { launch_vectorized_power<T, 1>(stream, output, input, exp, scale, shift); } } template void power<__half>(const Stream&, Span<__half>, View<__half>, __half, __half, __half); template void power<float>(const Stream&, Span<float>, View<float>, float, float, float); }}}} /* namespace cv::dnn::cuda4dnn::kernels */
2215d8f5c581ad55df3c0b9808bce1facf413236.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <sys/time.h> #include <string.h> using namespace std; __global__ void MatMul(double * A, double * B ,double * C,int m,int n,int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; if (i < m && j < k){ for(int x = 0;x < n;x++){ sum += A[i * n + x] * B[x * k + j]; } C[i * k + j] = sum; } } void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){ for(int i = 0;i < m;i++){ for(int j = 0;j < k;j++){ for(int x = 0;x < n;x++){ C[i * k + j] += A[i * n + x] * B[x * k + j]; } } } } int main() { timeval t1, t2; int x,y; cout << "Input threadsPerBlock.x:"; cin >> x; cout << "Input threadsPerBlock.y:"; cin >> y; dim3 threadsPerBlock(x,y); int m, n, k; cout << "Input problem size:"; cin >> m; n = m; k = m; dim3 numBlocks((m % threadsPerBlock.x) ? m / threadsPerBlock.x + 1 : m / threadsPerBlock.x ,(k % threadsPerBlock.y) ? k / threadsPerBlock.y + 1 : k / threadsPerBlock.y); double *A,*B,*C,*C1; A = (double*)malloc(sizeof(double) * m * n); B = (double*)malloc(sizeof(double) * k * n); C = (double*)malloc(sizeof(double) * m * k); C1 = (double*)malloc(sizeof(double) * m * k); for(int i = 0;i < m;i++){ for(int j = 0;j < n;j++){ A[i * n + j] = rand() % 10; } } for(int i = 0;i < n;i++){ for(int j = 0;j < k;j++){ B[i * k + j] = rand() % 10; } } memset(C,0,sizeof(C)); memset(C1,0,sizeof(C)); double * d_A,*d_B,*d_C; gettimeofday(&t1, NULL); hipMalloc(&d_A, sizeof(double) * m * n); hipMalloc(&d_B,sizeof(double) * n * k); hipMalloc(&d_C,sizeof(double) * m * k); hipMemcpy(d_A, A, sizeof(double) * m * n, hipMemcpyHostToDevice); hipMemcpy(d_B, B, sizeof(double) * n * k, hipMemcpyHostToDevice); hipLaunchKernelGGL(( MatMul), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, m, n, k); hipMemcpy(C, d_C, sizeof(double) * m * k, hipMemcpyDeviceToHost); gettimeofday(&t2, NULL); printf("GPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); gettimeofday(&t1, NULL); CPU_MatMul(A,B,C1,m,n,k); gettimeofday(&t2, NULL); printf("CPU time is:%lds\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); int flag = 0; for(int i = 0;i < m * k;i++){ if(fabs((C[i] - C1[i])) > 1e-4){ flag = 1; break; } } if(flag){ cout << "Wrong result." << endl; } else { cout << "The results are correct. " << endl; } hipFree(d_A); hipFree(d_B); hipFree(d_C); free(A); free(B); free(C); free(C1); }
2215d8f5c581ad55df3c0b9808bce1facf413236.cu
#include <iostream> #include <stdio.h> #include <sys/time.h> #include <string.h> using namespace std; __global__ void MatMul(double * A, double * B ,double * C,int m,int n,int k) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; double sum = 0; if (i < m && j < k){ for(int x = 0;x < n;x++){ sum += A[i * n + x] * B[x * k + j]; } C[i * k + j] = sum; } } void CPU_MatMul(double * A, double * B ,double * C,int m,int n,int k){ for(int i = 0;i < m;i++){ for(int j = 0;j < k;j++){ for(int x = 0;x < n;x++){ C[i * k + j] += A[i * n + x] * B[x * k + j]; } } } } int main() { timeval t1, t2; int x,y; cout << "Input threadsPerBlock.x:"; cin >> x; cout << "Input threadsPerBlock.y:"; cin >> y; dim3 threadsPerBlock(x,y); int m, n, k; cout << "Input problem size:"; cin >> m; n = m; k = m; dim3 numBlocks((m % threadsPerBlock.x) ? m / threadsPerBlock.x + 1 : m / threadsPerBlock.x ,(k % threadsPerBlock.y) ? k / threadsPerBlock.y + 1 : k / threadsPerBlock.y); double *A,*B,*C,*C1; A = (double*)malloc(sizeof(double) * m * n); B = (double*)malloc(sizeof(double) * k * n); C = (double*)malloc(sizeof(double) * m * k); C1 = (double*)malloc(sizeof(double) * m * k); for(int i = 0;i < m;i++){ for(int j = 0;j < n;j++){ A[i * n + j] = rand() % 10; } } for(int i = 0;i < n;i++){ for(int j = 0;j < k;j++){ B[i * k + j] = rand() % 10; } } memset(C,0,sizeof(C)); memset(C1,0,sizeof(C)); double * d_A,*d_B,*d_C; gettimeofday(&t1, NULL); cudaMalloc(&d_A, sizeof(double) * m * n); cudaMalloc(&d_B,sizeof(double) * n * k); cudaMalloc(&d_C,sizeof(double) * m * k); cudaMemcpy(d_A, A, sizeof(double) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, sizeof(double) * n * k, cudaMemcpyHostToDevice); MatMul<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, m, n, k); cudaMemcpy(C, d_C, sizeof(double) * m * k, cudaMemcpyDeviceToHost); gettimeofday(&t2, NULL); printf("GPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); gettimeofday(&t1, NULL); CPU_MatMul(A,B,C1,m,n,k); gettimeofday(&t2, NULL); printf("CPU time is:%ldμs\n",t2.tv_sec*1000000 + t2.tv_usec - t1.tv_sec*1000000 - t1.tv_usec); int flag = 0; for(int i = 0;i < m * k;i++){ if(fabs((C[i] - C1[i])) > 1e-4){ flag = 1; break; } } if(flag){ cout << "Wrong result." << endl; } else { cout << "The results are correct. " << endl; } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); free(C1); }
788f0880982791c7f1f0b829f3e552dfa01512d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_initialguess; int xdim0_poisson_kernel_initialguess_h = -1; int ydim0_poisson_kernel_initialguess_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x, y) (x + xdim0_poisson_kernel_initialguess * (y)) // user function __device__ void poisson_kernel_initialguess_gpu(double *u) { u[OPS_ACC0(0, 0)] = 0.0; } #undef OPS_ACC0 __global__ void ops_poisson_kernel_initialguess(double *__restrict arg0, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_poisson_kernel_initialguess; if (idx_x < size0 && idx_y < size1) { poisson_kernel_initialguess_gpu(arg0); } } // host stub function void ops_par_loop_poisson_kernel_initialguess(char const *name, ops_block block, int dim, int *range, ops_arg arg0) { // Timing double t1, t2, c1, c2; ops_arg args[1] = {arg0}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 1, range, 2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2, "poisson_kernel_initialguess"); OPS_kernels[2].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_poisson_kernel_initialguess_h) { hipMemcpyToSymbol(xdim0_poisson_kernel_initialguess, &xdim0, sizeof(int)); xdim0_poisson_kernel_initialguess_h = xdim0; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; char *p_a[1]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; ops_H_D_exchanges_device(args, 1); ops_halo_exchanges(args, 1, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_poisson_kernel_initialguess), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], x_size, y_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[2].time += t1 - t2; } ops_set_dirtybit_device(args, 1); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } }
788f0880982791c7f1f0b829f3e552dfa01512d4.cu
// // auto-generated by ops.py // __constant__ int xdim0_poisson_kernel_initialguess; int xdim0_poisson_kernel_initialguess_h = -1; int ydim0_poisson_kernel_initialguess_h = -1; #undef OPS_ACC0 #define OPS_ACC0(x, y) (x + xdim0_poisson_kernel_initialguess * (y)) // user function __device__ void poisson_kernel_initialguess_gpu(double *u) { u[OPS_ACC0(0, 0)] = 0.0; } #undef OPS_ACC0 __global__ void ops_poisson_kernel_initialguess(double *__restrict arg0, int size0, int size1) { int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_poisson_kernel_initialguess; if (idx_x < size0 && idx_y < size1) { poisson_kernel_initialguess_gpu(arg0); } } // host stub function void ops_par_loop_poisson_kernel_initialguess(char const *name, ops_block block, int dim, int *range, ops_arg arg0) { // Timing double t1, t2, c1, c2; ops_arg args[1] = {arg0}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 1, range, 2)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(2, "poisson_kernel_initialguess"); OPS_kernels[2].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 2; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 2; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int xdim0 = args[0].dat->size[0]; if (xdim0 != xdim0_poisson_kernel_initialguess_h) { cudaMemcpyToSymbol(xdim0_poisson_kernel_initialguess, &xdim0, sizeof(int)); xdim0_poisson_kernel_initialguess_h = xdim0; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; char *p_a[1]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; ops_H_D_exchanges_device(args, 1); ops_halo_exchanges(args, 1, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_poisson_kernel_initialguess<<<grid, tblock>>>((double *)p_a[0], x_size, y_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[2].time += t1 - t2; } ops_set_dirtybit_device(args, 1); ops_set_halo_dirtybit3(&args[0], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[2].mpi_time += t2 - t1; OPS_kernels[2].transfer += ops_compute_transfer(dim, start, end, &arg0); } }
c5c2cf655ba9fe1d8f008d4fc3cfd319bbfac2b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Image.cpp * * Created on: 6 gru 2015 * Author: pSolT */ #include "Image.cuh" Image::Image() { } Image::~Image() { free(h_greyImage__); free(h_rgbaImage__); hipFree(d_rgbaImage__); hipFree(d_greyImage__); } Image& Image::ApplyRGBAFilter(RGBAFilter * filter) { int numPixels = GetColumnsCount() * GetRowsCount(); uchar4 * result; hipMalloc(&result, sizeof(uchar4) * numPixels); hipMemcpy(d_rgbaImage__, h_rgbaImage__, sizeof(uchar4)*numPixels, hipMemcpyHostToDevice); filter->Apply(d_rgbaImage__, result, GetRowsCount(), GetColumnsCount()); hipMemcpy(h_rgbaImage__, result, sizeof(uchar4)*numPixels, hipMemcpyDeviceToHost); return *this; } Image& Image::ApplyGreyscaleFilter(GreyscaleFilter * filter) { int numPixels = GetColumnsCount() * GetRowsCount(); unsigned char * result; hipMalloc(&result, sizeof(unsigned char) * numPixels); hipMemcpy(d_greyImage__, h_greyImage__, sizeof(unsigned char)*numPixels, hipMemcpyHostToDevice); filter->Apply(d_greyImage__, result, GetRowsCount(), GetColumnsCount()); hipMemcpy(h_greyImage__, result, sizeof(unsigned char)*numPixels, hipMemcpyDeviceToHost); return *this; } Image* Image::Load(const std::string &filename) { Image * result = new Image(); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } // Convert image from default OpenCV color space to RGBA cv::cvtColor(image, result->imageRGBA, CV_BGR2RGBA); //allocate memory for the output result->imageGrey.create(image.rows, image.cols, CV_8UC1); //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!result->imageRGBA.isContinuous() || !result->imageGrey.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } result->h_rgbaImage__ = (uchar4 *) result->imageRGBA.ptr<unsigned char>(0); result->h_greyImage__ = result->imageGrey.ptr<unsigned char>(0); const size_t numPixels = result->GetRowsCount() * result->GetColumnsCount(); //allocate memory on the device for both input and output checkCudaErrors(hipMalloc(&result->d_rgbaImage__, sizeof(uchar4) * numPixels)); checkCudaErrors(hipMalloc(&result->d_greyImage__, sizeof(unsigned char) * numPixels)); //copy input array to the GPU checkCudaErrors( hipMemcpy(result->d_rgbaImage__, result->h_rgbaImage__, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(result->d_greyImage__, result->h_greyImage__, sizeof(unsigned char) * numPixels, hipMemcpyHostToDevice)); result->CreateGreyscale(); return result; } __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { int blockId = gridDim.x * blockIdx.y + blockIdx.x; int i = blockId * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; if(i < numRows * numCols) { greyImage[i] = .299f * rgbaImage[i].x + .587f * rgbaImage[i].y + .114f * rgbaImage[i].z; } } void Image::CreateGreyscale() { const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize( 32, 32, 1); //TODO hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, this->d_rgbaImage__, this->d_greyImage__, this->GetRowsCount(), this->GetColumnsCount()); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); checkCudaErrors( hipMemcpy(this->h_greyImage__, this->d_greyImage__, sizeof(unsigned char) * this->GetRowsCount() * this->GetColumnsCount(), hipMemcpyDeviceToHost)); } void Image::SaveGrayscale(const std::string &filename) { cv::Mat output(GetRowsCount(), GetColumnsCount(), CV_8UC1, (void*) h_greyImage__); //output the image cv::imwrite(filename.c_str(), output); } void Image::SaveRGBA(const std::string &filename) { cv::Mat output(GetRowsCount(), GetColumnsCount(), CV_8UC4, (void*) h_rgbaImage__); bool cont = output.isContinuous(); bool empt = output.empty(); int channels = output.channels(); cv::Mat bgr; cv::cvtColor(output, output, CV_RGB2BGR); //output the image cv::imwrite(filename.c_str(), output); } size_t Image::GetRowsCount() const { return imageRGBA.rows; } size_t Image::GetColumnsCount() const { return imageRGBA.cols; } uchar4 * Image::HostRGBA() const { return h_rgbaImage__; } uchar4 * Image::DeviceRGBA() const { return d_rgbaImage__; } unsigned char * Image::HostGreyscale() const { return h_greyImage__; } unsigned char * Image::DeviceGreyscale() const { return d_greyImage__; }
c5c2cf655ba9fe1d8f008d4fc3cfd319bbfac2b5.cu
/* * Image.cpp * * Created on: 6 gru 2015 * Author: pSolT */ #include "Image.cuh" Image::Image() { } Image::~Image() { free(h_greyImage__); free(h_rgbaImage__); cudaFree(d_rgbaImage__); cudaFree(d_greyImage__); } Image& Image::ApplyRGBAFilter(RGBAFilter * filter) { int numPixels = GetColumnsCount() * GetRowsCount(); uchar4 * result; cudaMalloc(&result, sizeof(uchar4) * numPixels); cudaMemcpy(d_rgbaImage__, h_rgbaImage__, sizeof(uchar4)*numPixels, cudaMemcpyHostToDevice); filter->Apply(d_rgbaImage__, result, GetRowsCount(), GetColumnsCount()); cudaMemcpy(h_rgbaImage__, result, sizeof(uchar4)*numPixels, cudaMemcpyDeviceToHost); return *this; } Image& Image::ApplyGreyscaleFilter(GreyscaleFilter * filter) { int numPixels = GetColumnsCount() * GetRowsCount(); unsigned char * result; cudaMalloc(&result, sizeof(unsigned char) * numPixels); cudaMemcpy(d_greyImage__, h_greyImage__, sizeof(unsigned char)*numPixels, cudaMemcpyHostToDevice); filter->Apply(d_greyImage__, result, GetRowsCount(), GetColumnsCount()); cudaMemcpy(h_greyImage__, result, sizeof(unsigned char)*numPixels, cudaMemcpyDeviceToHost); return *this; } Image* Image::Load(const std::string &filename) { Image * result = new Image(); cv::Mat image; image = cv::imread(filename.c_str(), CV_LOAD_IMAGE_COLOR); if (image.empty()) { std::cerr << "Couldn't open file: " << filename << std::endl; exit(1); } // Convert image from default OpenCV color space to RGBA cv::cvtColor(image, result->imageRGBA, CV_BGR2RGBA); //allocate memory for the output result->imageGrey.create(image.rows, image.cols, CV_8UC1); //This shouldn't ever happen given the way the images are created //at least based upon my limited understanding of OpenCV, but better to check if (!result->imageRGBA.isContinuous() || !result->imageGrey.isContinuous()) { std::cerr << "Images aren't continuous!! Exiting." << std::endl; exit(1); } result->h_rgbaImage__ = (uchar4 *) result->imageRGBA.ptr<unsigned char>(0); result->h_greyImage__ = result->imageGrey.ptr<unsigned char>(0); const size_t numPixels = result->GetRowsCount() * result->GetColumnsCount(); //allocate memory on the device for both input and output checkCudaErrors(cudaMalloc(&result->d_rgbaImage__, sizeof(uchar4) * numPixels)); checkCudaErrors(cudaMalloc(&result->d_greyImage__, sizeof(unsigned char) * numPixels)); //copy input array to the GPU checkCudaErrors( cudaMemcpy(result->d_rgbaImage__, result->h_rgbaImage__, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(result->d_greyImage__, result->h_greyImage__, sizeof(unsigned char) * numPixels, cudaMemcpyHostToDevice)); result->CreateGreyscale(); return result; } __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { int blockId = gridDim.x * blockIdx.y + blockIdx.x; int i = blockId * blockDim.x * blockDim.y + blockDim.x * threadIdx.y + threadIdx.x; if(i < numRows * numCols) { greyImage[i] = .299f * rgbaImage[i].x + .587f * rgbaImage[i].y + .114f * rgbaImage[i].z; } } void Image::CreateGreyscale() { const dim3 blockSize(32, 32, 1); //TODO const dim3 gridSize( 32, 32, 1); //TODO rgba_to_greyscale<<<gridSize, blockSize>>>(this->d_rgbaImage__, this->d_greyImage__, this->GetRowsCount(), this->GetColumnsCount()); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); checkCudaErrors( cudaMemcpy(this->h_greyImage__, this->d_greyImage__, sizeof(unsigned char) * this->GetRowsCount() * this->GetColumnsCount(), cudaMemcpyDeviceToHost)); } void Image::SaveGrayscale(const std::string &filename) { cv::Mat output(GetRowsCount(), GetColumnsCount(), CV_8UC1, (void*) h_greyImage__); //output the image cv::imwrite(filename.c_str(), output); } void Image::SaveRGBA(const std::string &filename) { cv::Mat output(GetRowsCount(), GetColumnsCount(), CV_8UC4, (void*) h_rgbaImage__); bool cont = output.isContinuous(); bool empt = output.empty(); int channels = output.channels(); cv::Mat bgr; cv::cvtColor(output, output, CV_RGB2BGR); //output the image cv::imwrite(filename.c_str(), output); } size_t Image::GetRowsCount() const { return imageRGBA.rows; } size_t Image::GetColumnsCount() const { return imageRGBA.cols; } uchar4 * Image::HostRGBA() const { return h_rgbaImage__; } uchar4 * Image::DeviceRGBA() const { return d_rgbaImage__; } unsigned char * Image::HostGreyscale() const { return h_greyImage__; } unsigned char * Image::DeviceGreyscale() const { return d_greyImage__; }
070ed2da559d76af7eeeb3a2bb0bd13d887741a8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void set_arr(float b, float * c, int N) { int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return; c[idx]=b; }
070ed2da559d76af7eeeb3a2bb0bd13d887741a8.cu
#include "includes.h" __global__ void set_arr(float b, float * c, int N) { int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx>=N) return; c[idx]=b; }
c716ca218c550ba2ef20c2eb96bb5f7a6d504383.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "ApplyCorrections.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *g_pSLM_uc = NULL; hipMalloc(&g_pSLM_uc, XSIZE*YSIZE); unsigned char *g_LUT = NULL; hipMalloc(&g_LUT, XSIZE*YSIZE); float *g_AberrationCorr_f = NULL; hipMalloc(&g_AberrationCorr_f, XSIZE*YSIZE); float *g_LUTPolCoeff_f = NULL; hipMalloc(&g_LUTPolCoeff_f, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( ApplyCorrections), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( ApplyCorrections), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( ApplyCorrections), dim3(gridBlock),dim3(threadBlock), 0, 0, g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c716ca218c550ba2ef20c2eb96bb5f7a6d504383.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "ApplyCorrections.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *g_pSLM_uc = NULL; cudaMalloc(&g_pSLM_uc, XSIZE*YSIZE); unsigned char *g_LUT = NULL; cudaMalloc(&g_LUT, XSIZE*YSIZE); float *g_AberrationCorr_f = NULL; cudaMalloc(&g_AberrationCorr_f, XSIZE*YSIZE); float *g_LUTPolCoeff_f = NULL; cudaMalloc(&g_LUTPolCoeff_f, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); ApplyCorrections<<<gridBlock,threadBlock>>>(g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { ApplyCorrections<<<gridBlock,threadBlock>>>(g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { ApplyCorrections<<<gridBlock,threadBlock>>>(g_pSLM_uc,g_LUT,g_AberrationCorr_f,g_LUTPolCoeff_f); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cc7f835c0848a96db6992659b4ee686784748b36.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Copy3DKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned short *d_src = NULL; hipMalloc(&d_src, XSIZE*YSIZE); float *d_dst = NULL; hipMalloc(&d_dst, XSIZE*YSIZE); float min_intensity = 1; const int width = 1; const int height = 1; const int depth = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Copy3DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_src,d_dst,min_intensity,width,height,depth); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Copy3DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_src,d_dst,min_intensity,width,height,depth); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Copy3DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_src,d_dst,min_intensity,width,height,depth); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cc7f835c0848a96db6992659b4ee686784748b36.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Copy3DKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const unsigned short *d_src = NULL; cudaMalloc(&d_src, XSIZE*YSIZE); float *d_dst = NULL; cudaMalloc(&d_dst, XSIZE*YSIZE); float min_intensity = 1; const int width = 1; const int height = 1; const int depth = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Copy3DKernel<<<gridBlock,threadBlock>>>(d_src,d_dst,min_intensity,width,height,depth); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Copy3DKernel<<<gridBlock,threadBlock>>>(d_src,d_dst,min_intensity,width,height,depth); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Copy3DKernel<<<gridBlock,threadBlock>>>(d_src,d_dst,min_intensity,width,height,depth); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
977ca89a3339e05dac1ccfe45b4f73c4105357d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <helper_functions.h> // helper functions for string parsing #include <helper_cuda.h> // helper functions CUDA error checking and initialization const char* path = "/home/tnallen/dev/ics2017/src/test/bad/test.cu"; const char* kernel = "clock_block"; __global__ void clock_block(clock_t* d_o, volatile long clock_count) { volatile long clock_offset = 0; long temp_clock = clock_count; while (clock_offset < temp_clock) { clock_offset++; } d_o[0] = clock_offset; } /* __global__ void clock_block(clock_t* d_o, clock_t clock_count) { clock_t start_clock = clock64(); clock_t clock_offset = 0; while (clock_offset < clock_count) { clock_offset = clock64() - start_clock; } d_o[0] = clock_offset; } */ int main() { long d_o; clock_t* d_p; long clock_count = 705000; hipMalloc((void**)&d_p, sizeof(clock_t)); size_t threads = 128; size_t blocks = 10 * 13 * 2048 / threads; double gpuTime; StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); hipLaunchKernelGGL(( clock_block), dim3(blocks), dim3(threads), 0, 0, d_p, clock_count); hipDeviceSynchronize(); hipMemcpy(&d_o, d_p, sizeof(long), hipMemcpyDeviceToHost); sdkStopTimer(&hTimer); gpuTime = sdkGetTimerValue(&hTimer); printf("clock: %u\n", d_o); printf("GPU() time : %f msec\n", gpuTime); }
977ca89a3339e05dac1ccfe45b4f73c4105357d2.cu
#include <helper_functions.h> // helper functions for string parsing #include <helper_cuda.h> // helper functions CUDA error checking and initialization const char* path = "/home/tnallen/dev/ics2017/src/test/bad/test.cu"; const char* kernel = "clock_block"; __global__ void clock_block(clock_t* d_o, volatile long clock_count) { volatile long clock_offset = 0; long temp_clock = clock_count; while (clock_offset < temp_clock) { clock_offset++; } d_o[0] = clock_offset; } /* __global__ void clock_block(clock_t* d_o, clock_t clock_count) { clock_t start_clock = clock64(); clock_t clock_offset = 0; while (clock_offset < clock_count) { clock_offset = clock64() - start_clock; } d_o[0] = clock_offset; } */ int main() { long d_o; clock_t* d_p; long clock_count = 705000; cudaMalloc((void**)&d_p, sizeof(clock_t)); size_t threads = 128; size_t blocks = 10 * 13 * 2048 / threads; double gpuTime; StopWatchInterface *hTimer = NULL; sdkCreateTimer(&hTimer); sdkResetTimer(&hTimer); sdkStartTimer(&hTimer); clock_block<<<blocks, threads>>>(d_p, clock_count); cudaDeviceSynchronize(); cudaMemcpy(&d_o, d_p, sizeof(long), cudaMemcpyDeviceToHost); sdkStopTimer(&hTimer); gpuTime = sdkGetTimerValue(&hTimer); printf("clock: %u\n", d_o); printf("GPU() time : %f msec\n", gpuTime); }
3f6f3a27d0915003f279d28283ce6677585be2b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template <typename DType> __device__ void Clip(DType* value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template <typename DType> __device__ void CalculateOverlap(const DType* a, const DType* b, DType* iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template <typename DType> __global__ __launch_bounds__(cuda::kMaxThreadsPerBlock) void DetectionForwardKernel( DType* out, const DType* cls_prob, const DType* loc_pred, const DType* anchors, DType* temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType* src = out; DType* dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out ? temp_space : out; dst = dst == out ? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType* compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template <typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType>& out, const Tensor<gpu, 3, DType>& cls_prob, const Tensor<gpu, 2, DType>& loc_pred, const Tensor<gpu, 2, DType>& anchors, const Tensor<gpu, 3, DType>& temp_space, const float threshold, const bool clip, const mxnet::Tuple<float>& variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( cuda::DetectionForwardKernel), dim3(num_blocks), dim3(num_threads), 0, stream, out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(hipGetLastError()); } } // namespace mshadow namespace mxnet { namespace op { template <> Operator* CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator* op = nullptr; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
3f6f3a27d0915003f279d28283ce6677585be2b2.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file multibox_detection.cu * \brief MultiBoxDetection op * \author Joshua Zhang */ #include "./multibox_detection-inl.h" #include <mshadow/cuda/tensor_gpu-inl.cuh> #define MULTIBOX_DETECTION_CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \ } while (0) namespace mshadow { namespace cuda { template <typename DType> __device__ void Clip(DType* value, const DType lower, const DType upper) { if ((*value) < lower) *value = lower; if ((*value) > upper) *value = upper; } template <typename DType> __device__ void CalculateOverlap(const DType* a, const DType* b, DType* iou) { DType w = max(DType(0), min(a[2], b[2]) - max(a[0], b[0])); DType h = max(DType(0), min(a[3], b[3]) - max(a[1], b[1])); DType i = w * h; DType u = (a[2] - a[0]) * (a[3] - a[1]) + (b[2] - b[0]) * (b[3] - b[1]) - i; (*iou) = u <= 0.f ? static_cast<DType>(0) : static_cast<DType>(i / u); } template <typename DType> __global__ __launch_bounds__(cuda::kMaxThreadsPerBlock) void DetectionForwardKernel( DType* out, const DType* cls_prob, const DType* loc_pred, const DType* anchors, DType* temp_space, const int num_classes, const int num_anchors, const float threshold, const bool clip, const float vx, const float vy, const float vw, const float vh, const float nms_threshold, const bool force_suppress, const int nms_topk) { const int nbatch = blockIdx.x; // each block for each batch int index = threadIdx.x; __shared__ int valid_count; out += nbatch * num_anchors * 6; cls_prob += nbatch * num_anchors * num_classes; loc_pred += nbatch * num_anchors * 4; if (index == 0) { valid_count = 0; } __syncthreads(); // apply prediction to anchors for (int i = index; i < num_anchors; i += blockDim.x) { DType score = -1; int id = 0; for (int j = 1; j < num_classes; ++j) { DType temp = cls_prob[j * num_anchors + i]; if (temp > score) { score = temp; id = j; } } if (id > 0 && score < threshold) { id = 0; } if (id > 0) { // valid class int pos = atomicAdd(&valid_count, 1); out[pos * 6] = id - 1; // restore original class id out[pos * 6 + 1] = (id == 0 ? DType(-1) : score); int offset = i * 4; DType al = anchors[offset]; DType at = anchors[offset + 1]; DType ar = anchors[offset + 2]; DType ab = anchors[offset + 3]; DType aw = ar - al; DType ah = ab - at; DType ax = (al + ar) / 2.f; DType ay = (at + ab) / 2.f; DType ox = loc_pred[offset] * vx * aw + ax; DType oy = loc_pred[offset + 1] * vy * ah + ay; DType ow = exp(loc_pred[offset + 2] * vw) * aw / 2; DType oh = exp(loc_pred[offset + 3] * vh) * ah / 2; DType xmin = ox - ow; DType ymin = oy - oh; DType xmax = ox + ow; DType ymax = oy + oh; if (clip) { Clip(&xmin, DType(0), DType(1)); Clip(&ymin, DType(0), DType(1)); Clip(&xmax, DType(0), DType(1)); Clip(&ymax, DType(0), DType(1)); } out[pos * 6 + 2] = xmin; out[pos * 6 + 3] = ymin; out[pos * 6 + 4] = xmax; out[pos * 6 + 5] = ymax; } } __syncthreads(); if (valid_count < 1 || nms_threshold <= 0 || nms_threshold > 1) return; // if (index == 0) printf("%d\n", valid_count); // descent sort according to scores const int size = valid_count; temp_space += nbatch * num_anchors * 6; DType* src = out; DType* dst = temp_space; for (int width = 2; width < (size << 1); width <<= 1) { int slices = (size - 1) / (blockDim.x * width) + 1; int start = width * index * slices; for (int slice = 0; slice < slices; ++slice) { if (start >= size) break; int middle = start + (width >> 1); if (middle > size) middle = size; int end = start + width; if (end > size) end = size; int i = start; int j = middle; for (int k = start; k < end; ++k) { DType score_i = i < size ? src[i * 6 + 1] : DType(-1); DType score_j = j < size ? src[j * 6 + 1] : DType(-1); if (i < middle && (j >= end || score_i > score_j)) { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[i * 6 + n]; } ++i; } else { for (int n = 0; n < 6; ++n) { dst[k * 6 + n] = src[j * 6 + n]; } ++j; } } start += width; } __syncthreads(); src = src == out ? temp_space : out; dst = dst == out ? temp_space : out; } __syncthreads(); if (src == temp_space) { // copy from temp to out for (int i = index; i < size * 6; i += blockDim.x) { out[i] = temp_space[i]; } __syncthreads(); } // keep top k detections int ntop = size; if (nms_topk > 0 && nms_topk < ntop) { ntop = nms_topk; for (int i = ntop + index; i < size; i += blockDim.x) { out[i * 6] = -1; } __syncthreads(); } // apply NMS for (int compare_pos = 0; compare_pos < ntop; ++compare_pos) { DType compare_id = out[compare_pos * 6]; if (compare_id < 0) continue; // not a valid positive detection, skip DType* compare_loc_ptr = out + compare_pos * 6 + 2; for (int i = compare_pos + index + 1; i < ntop; i += blockDim.x) { DType class_id = out[i * 6]; if (class_id < 0) continue; if (force_suppress || (class_id == compare_id)) { DType iou; CalculateOverlap(compare_loc_ptr, out + i * 6 + 2, &iou); if (iou >= nms_threshold) { out[i * 6] = -1; } } } __syncthreads(); } } } // namespace cuda template <typename DType> inline void MultiBoxDetectionForward(const Tensor<gpu, 3, DType>& out, const Tensor<gpu, 3, DType>& cls_prob, const Tensor<gpu, 2, DType>& loc_pred, const Tensor<gpu, 2, DType>& anchors, const Tensor<gpu, 3, DType>& temp_space, const float threshold, const bool clip, const mxnet::Tuple<float>& variances, const float nms_threshold, const bool force_suppress, const int nms_topk) { CHECK_EQ(variances.ndim(), 4) << "Variance size must be 4"; const int num_classes = cls_prob.size(1); const int num_anchors = cls_prob.size(2); const int num_batches = cls_prob.size(0); const int num_threads = cuda::kMaxThreadsPerBlock; int num_blocks = num_batches; cuda::CheckLaunchParam(num_blocks, num_threads, "MultiBoxDetection Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); cuda::DetectionForwardKernel<<<num_blocks, num_threads, 0, stream>>>(out.dptr_, cls_prob.dptr_, loc_pred.dptr_, anchors.dptr_, temp_space.dptr_, num_classes, num_anchors, threshold, clip, variances[0], variances[1], variances[2], variances[3], nms_threshold, force_suppress, nms_topk); MULTIBOX_DETECTION_CUDA_CHECK(cudaGetLastError()); } } // namespace mshadow namespace mxnet { namespace op { template <> Operator* CreateOp<gpu>(MultiBoxDetectionParam param, int dtype) { Operator* op = nullptr; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new MultiBoxDetectionOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
9e9efdaff1a70526fb2ab7c5954b896b290edfb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als_mpi.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel3), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, (half*)A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), 0, //updateXWithCGKernel2, batchSize, 96, 4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); hipDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(hipMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); hipLaunchKernelGGL(( fp16Array2fp32Array), dim3((f*f-1)/1024 + 1), dim3(1024), 0, 0, A_fp32, (half*)A, f*f); hipDeviceSynchronize(); cudaCheckError(); cudacall(hipMemcpy(h_A, A_fp32, f * f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(hipFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(hipMemcpy(h_x, x, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(hipMemcpy(h_b, b, f * sizeof(float), hipMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHostAsync(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter, hipStream_t *stream){ hipLaunchKernelGGL(( updateXWithCGKernel), dim3(batchSize), dim3(f), (4*f+4)*sizeof(float), *stream, A, x, b, batchSize, f, cgIter); } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ hipLaunchKernelGGL(( alsUpdateFeature100), dim3(m), dim3(64), SCAN_BATCH * F/2*sizeof(float2), 0, batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); hipDeviceSynchronize(); cudaCheckError(); }
9e9efdaff1a70526fb2ab7c5954b896b290edfb1.cu
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * cg.cu * Created on: July 22, 2016 * Author: Wei Tan ([email protected]) * CUDA kernels related to batch CG solver used in ALS * CG solver: https://en.wikipedia.org/wiki/Conjugate_gradient_method * Code optimized for F = 100, and on cc 3.5, 3.7 platforms. Also tested in cc 5.2 */ #include "als_mpi.h" #include "device_utilities.h" #include "host_utilities.h" #include <fstream> #define SCAN_BATCH 24 #define CG_ERROR 1e-4 #undef DEBUG //CG (iterative solve) kernel //each block solves a A*x=b __global__ void updateXWithCGKernel(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; //sharedx[threadIdx.x] = 0; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += A[blockIdx.x*f*f + f*i + threadIdx.x]*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //CG (iterative solve) kernel //each block solves a A*x=b and A in fp16 __global__ void updateXWithCGKernel3(half * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x sharedx[threadIdx.x] = x[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); //r=b-A*x; float temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedx[i]; sharedr[threadIdx.x] = b[blockIdx.x*blockDim.x + threadIdx.x] - temp; //p=r; sharedp[threadIdx.x] = sharedr[threadIdx.x]; //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = sharedr[threadIdx.x] *sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! temp = 0; for(int i = 0; i < f; i++) //this is math correct and coalesced because A is symmetric temp += __half2float(A[blockIdx.x*f*f + f*i + threadIdx.x])*sharedp[i]; sharedap[threadIdx.x] = temp; #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = sharedp[threadIdx.x] *sharedap[threadIdx.x]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); //x=x+alpha*p; sharedx[threadIdx.x] = sharedx[threadIdx.x] + alpha[0] * sharedp[threadIdx.x]; //r=r-alpha*Ap; sharedr[threadIdx.x] = sharedr[threadIdx.x] - alpha[0] * sharedap[threadIdx.x]; //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; sharedp[threadIdx.x] = sharedr[threadIdx.x] + beta[0] * sharedp[threadIdx.x]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx x[blockIdx.x*blockDim.x + threadIdx.x] = sharedx[threadIdx.x]; } //blockDim.x=64 or 96 (two or three WARPs) instead of 100 -- WARP shuffle seems requiring this __global__ void updateXWithCGKernel2(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ extern __shared__ float smem[]; float *sharedx = &smem[0]; float *sharedp = &smem[f]; float *sharedr = &smem[2*f]; float *sharedap = &smem[3*f]; float *rsold = &smem[4*f]; float *alpha = &smem[4*f+1]; float *rsnew = &smem[4*f+2]; float *beta = &smem[4*f+3]; //sharedx<--x for(int k = threadIdx.x; k < f; k += blockDim.x) sharedx[k] = x[blockIdx.x*f + k]; __syncthreads(); //r=b-A*x; float temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedx[i]; sharedr[k] = b[blockIdx.x*f + k] - temp; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); //temp = blockReduceSum(shared, temp); __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; //WARN: set temp to zero since the next operation is +=! for(int k = threadIdx.x; k < f; k += blockDim.x){ temp = 0; for(int i = 0; i < f; i++) temp += A[blockIdx.x*f*f + f*i + k]*sharedp[i]; sharedap[k] = temp; } #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; } //NOT needed? //__syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; /* temp = sharedr[threadIdx.x]*sharedr[threadIdx.x]; temp = blockReduceSum(shared, temp); __syncthreads(); if(threadIdx.x == 0){ rsnew[0] = temp; } */ temp = 0; for(int k = threadIdx.x; k < f; k += blockDim.x) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); for(int k = threadIdx.x; k < f; k += blockDim.x) //p=r+(rsnew/rsold)*p; sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < f; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < f; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations for(int k = threadIdx.x; k < f; k += blockDim.x) //x<--sharedx x[blockIdx.x*f + k] = sharedx[k]; } void updateXWithCGHost_tt_fp16(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel3<<<batchSize, f, (4*f+4)*sizeof(float)>>> ((half*)A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHost(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter){ updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float)>>> //updateXWithCGKernel2<<<batchSize, 96, (4*f+4)*sizeof(float)>>> (A, x, b, batchSize, f, cgIter); cudaDeviceSynchronize(); cudaCheckError(); #ifdef DEBUG printf("***A[0]:\n"); float *h_A = new float[f * f]; float *A_fp32; cudacall(cudaMalloc((void** ) &A_fp32, f * f * sizeof(A_fp32[0]))); fp16Array2fp32Array<<<(f*f-1)/1024 + 1, 1024>>>(A_fp32, (half*)A, f*f); cudaDeviceSynchronize(); cudaCheckError(); cudacall(cudaMemcpy(h_A, A_fp32, f * f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f*f; i++) printf("%f ", h_A[i]); printf("\n"); delete [] h_A; cudacall(cudaFree(A_fp32)); printf("***x[0]:\n"); float *h_x = new float[f]; cudacall(cudaMemcpy(h_x, x, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_x[i]); printf("\n"); delete [] h_x; /* printf("***b[0]:\n"); float *h_b = new float[f]; cudacall(cudaMemcpy(h_b, b, f * sizeof(float), cudaMemcpyDeviceToHost)); for(int i = 0; i < f; i++) printf("%f ", h_b[i]); printf("\n"); delete [] h_b; */ #endif } void updateXWithCGHostAsync(float * A, float * x, float * b, const int batchSize, const int f, const float cgIter, cudaStream_t *stream){ updateXWithCGKernel<<<batchSize, f, (4*f+4)*sizeof(float), *stream>>>(A, x, b, batchSize, f, cgIter); } //fused kernel, use thetaT to update XT __global__ void __launch_bounds__(64) alsUpdateFeature100(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* thetaT, float* XT, float* ythetaT, int cgIter) { extern __shared__ float2 thetaTemp[]; int row = blockIdx.x + batch_offset; if (row < m) { //this block needs to handle end - start thetaT columns int start = csrRowIndex[row]; int end = csrRowIndex[row + 1]; //slide through [start, end] by window size SCAN_BATCH int iterations = (end - start - 1)/SCAN_BATCH + 1; float temp0= 0, temp1= 0, temp2= 0, temp3= 0, temp4= 0, temp5= 0, temp6= 0, temp7= 0, temp8= 0, temp9 = 0; float temp10= 0, temp11= 0, temp12= 0, temp13= 0, temp14= 0, temp15= 0, temp16= 0, temp17= 0, temp18= 0, temp19 = 0; float temp20= 0, temp21= 0, temp22= 0, temp23= 0, temp24= 0, temp25= 0, temp26= 0, temp27= 0, temp28= 0, temp29 = 0; float temp30= 0, temp31= 0, temp32= 0, temp33= 0, temp34= 0, temp35= 0, temp36= 0, temp37= 0, temp38= 0, temp39 = 0; float temp40= 0, temp41= 0, temp42= 0, temp43= 0, temp44= 0, temp45= 0, temp46= 0, temp47= 0, temp48= 0, temp49 = 0; float temp50= 0, temp51= 0, temp52= 0, temp53= 0, temp54= 0, temp55= 0, temp56= 0, temp57= 0, temp58= 0, temp59 = 0; float temp60= 0, temp61= 0, temp62= 0, temp63= 0, temp64= 0, temp65= 0, temp66= 0, temp67= 0, temp68= 0, temp69 = 0; float temp70= 0, temp71= 0, temp72= 0, temp73= 0, temp74= 0, temp75= 0, temp76= 0, temp77= 0, temp78= 0, temp79 = 0; float temp80= 0, temp81= 0, temp82= 0, temp83= 0, temp84= 0, temp85= 0, temp86= 0, temp87= 0, temp88= 0, temp89 = 0; float temp90= 0, temp91= 0, temp92= 0, temp93= 0, temp94= 0, temp95= 0, temp96= 0, temp97= 0, temp98= 0, temp99 = 0; int tile_x = 0; int tile_y = 0; int tile = F/10; for ( int i = 0; i < 10; i++){ int end = ((20-i)*(i+1))/2; if(threadIdx.x < end){ tile_x = i * tile; tile_y = (10 + threadIdx.x - end) * tile; break; } } //iteration: copy gmem-->smem; aggregate smem-->register for (int iter = 0; iter < iterations; iter ++){ float2 theta; //copy texture --> smem, and sync //two layers: warp divergence unless we split at 32 //require 32 >= SCAN_BATCH if(threadIdx.x < 2*32 ){ //int index = threadIdx.x; int index = threadIdx.x - (threadIdx.x/32)*32; //0 to 31; if(index < SCAN_BATCH){ if(iter*SCAN_BATCH + index < end - start){ //for (int k = 50*(threadIdx.x/32); k < 50*(threadIdx.x/32) + 50; k += 2){ //IMPORTANT: for loop has constant and identical start and end if(threadIdx.x < 32){ for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k+1]); thetaTemp[index * F/2 + k/2] = theta; } } else { for (int k = 0; k < 50; k += 2){ theta.x = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 50]); theta.y = __ldg(&thetaT[ F * csrColIndex[start + iter*SCAN_BATCH + index] + k + 51]); thetaTemp[index * F/2 + k/2 + 25] = theta; } } } //must be the last iteration; no need to check //not enough theta to copy, set zero else memset(&thetaTemp[index*F/2], 0, F*sizeof(float)); } } __syncthreads(); //tile: 10*10 if(threadIdx.x < 55 ){ for(int k = 0; k < SCAN_BATCH; k++){ accumulate_in_registers(); } } } //end of iteration in copying from smem and aggregating in register __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //newly added CG phase //reuse the abundant shared memory float *sharedx = (float*)&thetaTemp[0]; float *sharedp = (float*)&thetaTemp[50]; float *sharedr = (float*)&thetaTemp[100]; float *sharedap = (float*)&thetaTemp[150]; float *sharedax = (float*)&thetaTemp[200]; float *rsold = (float*)&thetaTemp[250]; float *alpha = (float*)&thetaTemp[251]; float *rsnew = (float*)&thetaTemp[252]; float *beta = (float*)&thetaTemp[253]; //sharedx<--x for(int k = threadIdx.x; k < F; k += 64){ sharedx[k] = XT[blockIdx.x*F + k]; sharedax[k] = 0; } __syncthreads(); float temp = 0; //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ //add regularization if(tile_x==tile_y){ temp = (end - start) * lambda; temp0 += temp; temp11 += temp; temp22 += temp; temp33 += temp; temp44 += temp; temp55 += temp; temp66 += temp; temp77 += temp; temp88 += temp; temp99 += temp; } #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***temp 0~9: %f %f %f %f %f %f %f %f %f %f\n", temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9); } #endif //r=b-A*x; //step1: ax=A*x atomicAdd(&sharedax[tile_y], temp0*sharedx[tile_x] + temp10*sharedx[tile_x+1] + temp20*sharedx[tile_x+2] + temp30*sharedx[tile_x+3] + temp40*sharedx[tile_x + 4] + temp50*sharedx[tile_x + 5] + temp60*sharedx[tile_x + 6] + temp70*sharedx[tile_x + 7] + temp80*sharedx[tile_x + 8] + temp90*sharedx[tile_x + 9]); atomicAdd(&sharedax[tile_y+1], temp1*sharedx[tile_x] + temp11*sharedx[tile_x+1] + temp21*sharedx[tile_x+2] + temp31*sharedx[tile_x+3] + temp41*sharedx[tile_x+4] + temp51*sharedx[tile_x+5] + temp61*sharedx[tile_x+6] + temp71*sharedx[tile_x+7] + temp81*sharedx[tile_x+8] + temp91*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+2], temp2*sharedx[tile_x] + temp12*sharedx[tile_x+1] + temp22*sharedx[tile_x+2] + temp32*sharedx[tile_x+3] + temp42*sharedx[tile_x+4] + temp52*sharedx[tile_x+5] + temp62*sharedx[tile_x+6] + temp72*sharedx[tile_x+7] + temp82*sharedx[tile_x+8] + temp92*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+3], temp3*sharedx[tile_x] + temp13*sharedx[tile_x+1] + temp23*sharedx[tile_x+2] + temp33*sharedx[tile_x+3] + temp43*sharedx[tile_x+4] + temp53*sharedx[tile_x+5] + temp63*sharedx[tile_x+6] + temp73*sharedx[tile_x+7] + temp83*sharedx[tile_x+8] + temp93*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+4], temp4*sharedx[tile_x] + temp14*sharedx[tile_x+1] + temp24*sharedx[tile_x+2] + temp34*sharedx[tile_x+3] + temp44*sharedx[tile_x+4] + temp54*sharedx[tile_x+5] + temp64*sharedx[tile_x+6] + temp74*sharedx[tile_x+7] + temp84*sharedx[tile_x+8] + temp94*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+5], temp5*sharedx[tile_x] + temp15*sharedx[tile_x+1] + temp25*sharedx[tile_x+2] + temp35*sharedx[tile_x+3] + temp45*sharedx[tile_x+4] + temp55*sharedx[tile_x+5] + temp65*sharedx[tile_x+6] + temp75*sharedx[tile_x+7] + temp85*sharedx[tile_x+8] + temp95*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+6], temp6*sharedx[tile_x] + temp16*sharedx[tile_x+1] + temp26*sharedx[tile_x+2] + temp36*sharedx[tile_x+3] + temp46*sharedx[tile_x+4] + temp56*sharedx[tile_x+5] + temp66*sharedx[tile_x+6] + temp76*sharedx[tile_x+7] + temp86*sharedx[tile_x+8] + temp96*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+7], temp7*sharedx[tile_x] + temp17*sharedx[tile_x+1] + temp27*sharedx[tile_x+2] + temp37*sharedx[tile_x+3] + temp47*sharedx[tile_x+4] + temp57*sharedx[tile_x+5] + temp67*sharedx[tile_x+6] + temp77*sharedx[tile_x+7] + temp87*sharedx[tile_x+8] + temp97*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+8], temp8*sharedx[tile_x] + temp18*sharedx[tile_x+1] + temp28*sharedx[tile_x+2] + temp38*sharedx[tile_x+3] + temp48*sharedx[tile_x+4] + temp58*sharedx[tile_x+5] + temp68*sharedx[tile_x+6] + temp78*sharedx[tile_x+7] + temp88*sharedx[tile_x+8] + temp98*sharedx[tile_x+9]); atomicAdd(&sharedax[tile_y+9], temp9*sharedx[tile_x] + temp19*sharedx[tile_x+1] + temp29*sharedx[tile_x+2] + temp39*sharedx[tile_x+3] + temp49*sharedx[tile_x+4] + temp59*sharedx[tile_x+5] + temp69*sharedx[tile_x+6] + temp79*sharedx[tile_x+7] + temp89*sharedx[tile_x+8] + temp99*sharedx[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedax[tile_x], temp0*sharedx[tile_y] + temp1*sharedx[tile_y + 1] + temp2*sharedx[tile_y + 2] + temp3*sharedx[tile_y + 3] + temp4*sharedx[tile_y + 4] + temp5*sharedx[tile_y + 5] + temp6*sharedx[tile_y + 6] + temp7*sharedx[tile_y + 7] + temp8*sharedx[tile_y + 8] + temp9*sharedx[tile_y + 9]); atomicAdd(&sharedax[tile_x+1], temp10*sharedx[tile_y] + temp11*sharedx[tile_y+1] + temp12*sharedx[tile_y+2] + temp13*sharedx[tile_y+3] + temp14*sharedx[tile_y+4] + temp15*sharedx[tile_y+5] + temp16*sharedx[tile_y+6] + temp17*sharedx[tile_y+7] + temp18*sharedx[tile_y+8] + temp19*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+2], temp20*sharedx[tile_y] + temp21*sharedx[tile_y+1] + temp22*sharedx[tile_y+2] + temp23*sharedx[tile_y+3] + temp24*sharedx[tile_y+4] + temp25*sharedx[tile_y+5] + temp26*sharedx[tile_y+6] + temp27*sharedx[tile_y+7] + temp28*sharedx[tile_y+8] + temp29*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+3], temp30*sharedx[tile_y] + temp31*sharedx[tile_y+1] + temp32*sharedx[tile_y+2] + temp33*sharedx[tile_y+3] + temp34*sharedx[tile_y+4] + temp35*sharedx[tile_y+5] + temp36*sharedx[tile_y+6] + temp37*sharedx[tile_y+7] + temp38*sharedx[tile_y+8] + temp39*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+4], temp40*sharedx[tile_y] + temp41*sharedx[tile_y+1] + temp42*sharedx[tile_y+2] + temp43*sharedx[tile_y+3] + temp44*sharedx[tile_y+4] + temp45*sharedx[tile_y+5] + temp46*sharedx[tile_y+6] + temp47*sharedx[tile_y+7] + temp48*sharedx[tile_y+8] + temp49*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+5], temp50*sharedx[tile_y] + temp51*sharedx[tile_y+1] + temp52*sharedx[tile_y+2] + temp53*sharedx[tile_y+3] + temp54*sharedx[tile_y+4] + temp55*sharedx[tile_y+5] + temp56*sharedx[tile_y+6] + temp57*sharedx[tile_y+7] + temp58*sharedx[tile_y+8] + temp59*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+6], temp60*sharedx[tile_y] + temp61*sharedx[tile_y+1] + temp62*sharedx[tile_y+2] + temp63*sharedx[tile_y+3] + temp64*sharedx[tile_y+4] + temp65*sharedx[tile_y+5] + temp66*sharedx[tile_y+6] + temp67*sharedx[tile_y+7] + temp68*sharedx[tile_y+8] + temp69*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+7], temp70*sharedx[tile_y] + temp71*sharedx[tile_y+1] + temp72*sharedx[tile_y+2] + temp73*sharedx[tile_y+3] + temp74*sharedx[tile_y+4] + temp75*sharedx[tile_y+5] + temp76*sharedx[tile_y+6] + temp77*sharedx[tile_y+7] + temp78*sharedx[tile_y+8] + temp79*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+8], temp80*sharedx[tile_y] + temp81*sharedx[tile_y+1] + temp82*sharedx[tile_y+2] + temp83*sharedx[tile_y+3] + temp84*sharedx[tile_y+4] + temp85*sharedx[tile_y+5] + temp86*sharedx[tile_y+6] + temp87*sharedx[tile_y+7] + temp88*sharedx[tile_y+8] + temp89*sharedx[tile_y+9]); atomicAdd(&sharedax[tile_x+9], temp90*sharedx[tile_y] + temp91*sharedx[tile_y+1] + temp92*sharedx[tile_y+2] + temp93*sharedx[tile_y+3] + temp94*sharedx[tile_y+4] + temp95*sharedx[tile_y+5] + temp96*sharedx[tile_y+6] + temp97*sharedx[tile_y+7] + temp98*sharedx[tile_y+8] + temp99*sharedx[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***x:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n\n"); printf("***r=Ax:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); } #endif for(int k = threadIdx.x; k < F; k += 64){ //r=b-Ax sharedr[k] = ythetaT[blockIdx.x*blockDim.x + k] - sharedax[k]; //p=r; sharedp[k] = sharedr[k]; } //rsold=r'*r; if(threadIdx.x == 0){ rsold[0] = 0; } for(int k = threadIdx.x; k < F; k += 64){ temp += sharedr[k]*sharedr[k]; } blockReduceSumWithAtomics(rsold, temp); __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***shared memory content after 1st blockReduceSum:\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedx[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedax[i]); printf("\n\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < 100; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif ///* //CG iterations for(int iter = 0; iter < cgIter; iter++){ //ap=A*p; for(int k = threadIdx.x; k < F; k += 64) sharedap[k] = 0; __syncthreads(); //only uses 55 threads for A*p and A*x if(threadIdx.x < 55){ atomicAdd(&sharedap[tile_y], temp0*sharedp[tile_x] + temp10*sharedp[tile_x+1] + temp20*sharedp[tile_x+2] + temp30*sharedp[tile_x+3] + temp40*sharedp[tile_x + 4] + temp50*sharedp[tile_x + 5] + temp60*sharedp[tile_x + 6] + temp70*sharedp[tile_x + 7] + temp80*sharedp[tile_x + 8] + temp90*sharedp[tile_x + 9]); atomicAdd(&sharedap[tile_y+1], temp1*sharedp[tile_x] + temp11*sharedp[tile_x+1] + temp21*sharedp[tile_x+2] + temp31*sharedp[tile_x+3] + temp41*sharedp[tile_x+4] + temp51*sharedp[tile_x+5] + temp61*sharedp[tile_x+6] + temp71*sharedp[tile_x+7] + temp81*sharedp[tile_x+8] + temp91*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+2], temp2*sharedp[tile_x] + temp12*sharedp[tile_x+1] + temp22*sharedp[tile_x+2] + temp32*sharedp[tile_x+3] + temp42*sharedp[tile_x+4] + temp52*sharedp[tile_x+5] + temp62*sharedp[tile_x+6] + temp72*sharedp[tile_x+7] + temp82*sharedp[tile_x+8] + temp92*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+3], temp3*sharedp[tile_x] + temp13*sharedp[tile_x+1] + temp23*sharedp[tile_x+2] + temp33*sharedp[tile_x+3] + temp43*sharedp[tile_x+4] + temp53*sharedp[tile_x+5] + temp63*sharedp[tile_x+6] + temp73*sharedp[tile_x+7] + temp83*sharedp[tile_x+8] + temp93*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+4], temp4*sharedp[tile_x] + temp14*sharedp[tile_x+1] + temp24*sharedp[tile_x+2] + temp34*sharedp[tile_x+3] + temp44*sharedp[tile_x+4] + temp54*sharedp[tile_x+5] + temp64*sharedp[tile_x+6] + temp74*sharedp[tile_x+7] + temp84*sharedp[tile_x+8] + temp94*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+5], temp5*sharedp[tile_x] + temp15*sharedp[tile_x+1] + temp25*sharedp[tile_x+2] + temp35*sharedp[tile_x+3] + temp45*sharedp[tile_x+4] + temp55*sharedp[tile_x+5] + temp65*sharedp[tile_x+6] + temp75*sharedp[tile_x+7] + temp85*sharedp[tile_x+8] + temp95*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+6], temp6*sharedp[tile_x] + temp16*sharedp[tile_x+1] + temp26*sharedp[tile_x+2] + temp36*sharedp[tile_x+3] + temp46*sharedp[tile_x+4] + temp56*sharedp[tile_x+5] + temp66*sharedp[tile_x+6] + temp76*sharedp[tile_x+7] + temp86*sharedp[tile_x+8] + temp96*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+7], temp7*sharedp[tile_x] + temp17*sharedp[tile_x+1] + temp27*sharedp[tile_x+2] + temp37*sharedp[tile_x+3] + temp47*sharedp[tile_x+4] + temp57*sharedp[tile_x+5] + temp67*sharedp[tile_x+6] + temp77*sharedp[tile_x+7] + temp87*sharedp[tile_x+8] + temp97*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+8], temp8*sharedp[tile_x] + temp18*sharedp[tile_x+1] + temp28*sharedp[tile_x+2] + temp38*sharedp[tile_x+3] + temp48*sharedp[tile_x+4] + temp58*sharedp[tile_x+5] + temp68*sharedp[tile_x+6] + temp78*sharedp[tile_x+7] + temp88*sharedp[tile_x+8] + temp98*sharedp[tile_x+9]); atomicAdd(&sharedap[tile_y+9], temp9*sharedp[tile_x] + temp19*sharedp[tile_x+1] + temp29*sharedp[tile_x+2] + temp39*sharedp[tile_x+3] + temp49*sharedp[tile_x+4] + temp59*sharedp[tile_x+5] + temp69*sharedp[tile_x+6] + temp79*sharedp[tile_x+7] + temp89*sharedp[tile_x+8] + temp99*sharedp[tile_x+9]); if(tile_x!=tile_y){ atomicAdd(&sharedap[tile_x], temp0*sharedp[tile_y] + temp1*sharedp[tile_y + 1] + temp2*sharedp[tile_y + 2] + temp3*sharedp[tile_y + 3] + temp4*sharedp[tile_y + 4] + temp5*sharedp[tile_y + 5] + temp6*sharedp[tile_y + 6] + temp7*sharedp[tile_y + 7] + temp8*sharedp[tile_y + 8] + temp9*sharedp[tile_y + 9]); atomicAdd(&sharedap[tile_x+1], temp10*sharedp[tile_y] + temp11*sharedp[tile_y+1] + temp12*sharedp[tile_y+2] + temp13*sharedp[tile_y+3] + temp14*sharedp[tile_y+4] + temp15*sharedp[tile_y+5] + temp16*sharedp[tile_y+6] + temp17*sharedp[tile_y+7] + temp18*sharedp[tile_y+8] + temp19*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+2], temp20*sharedp[tile_y] + temp21*sharedp[tile_y+1] + temp22*sharedp[tile_y+2] + temp23*sharedp[tile_y+3] + temp24*sharedp[tile_y+4] + temp25*sharedp[tile_y+5] + temp26*sharedp[tile_y+6] + temp27*sharedp[tile_y+7] + temp28*sharedp[tile_y+8] + temp29*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+3], temp30*sharedp[tile_y] + temp31*sharedp[tile_y+1] + temp32*sharedp[tile_y+2] + temp33*sharedp[tile_y+3] + temp34*sharedp[tile_y+4] + temp35*sharedp[tile_y+5] + temp36*sharedp[tile_y+6] + temp37*sharedp[tile_y+7] + temp38*sharedp[tile_y+8] + temp39*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+4], temp40*sharedp[tile_y] + temp41*sharedp[tile_y+1] + temp42*sharedp[tile_y+2] + temp43*sharedp[tile_y+3] + temp44*sharedp[tile_y+4] + temp45*sharedp[tile_y+5] + temp46*sharedp[tile_y+6] + temp47*sharedp[tile_y+7] + temp48*sharedp[tile_y+8] + temp49*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+5], temp50*sharedp[tile_y] + temp51*sharedp[tile_y+1] + temp52*sharedp[tile_y+2] + temp53*sharedp[tile_y+3] + temp54*sharedp[tile_y+4] + temp55*sharedp[tile_y+5] + temp56*sharedp[tile_y+6] + temp57*sharedp[tile_y+7] + temp58*sharedp[tile_y+8] + temp59*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+6], temp60*sharedp[tile_y] + temp61*sharedp[tile_y+1] + temp62*sharedp[tile_y+2] + temp63*sharedp[tile_y+3] + temp64*sharedp[tile_y+4] + temp65*sharedp[tile_y+5] + temp66*sharedp[tile_y+6] + temp67*sharedp[tile_y+7] + temp68*sharedp[tile_y+8] + temp69*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+7], temp70*sharedp[tile_y] + temp71*sharedp[tile_y+1] + temp72*sharedp[tile_y+2] + temp73*sharedp[tile_y+3] + temp74*sharedp[tile_y+4] + temp75*sharedp[tile_y+5] + temp76*sharedp[tile_y+6] + temp77*sharedp[tile_y+7] + temp78*sharedp[tile_y+8] + temp79*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+8], temp80*sharedp[tile_y] + temp81*sharedp[tile_y+1] + temp82*sharedp[tile_y+2] + temp83*sharedp[tile_y+3] + temp84*sharedp[tile_y+4] + temp85*sharedp[tile_y+5] + temp86*sharedp[tile_y+6] + temp87*sharedp[tile_y+7] + temp88*sharedp[tile_y+8] + temp89*sharedp[tile_y+9]); atomicAdd(&sharedap[tile_x+9], temp90*sharedp[tile_y] + temp91*sharedp[tile_y+1] + temp92*sharedp[tile_y+2] + temp93*sharedp[tile_y+3] + temp94*sharedp[tile_y+4] + temp95*sharedp[tile_y+5] + temp96*sharedp[tile_y+6] + temp97*sharedp[tile_y+7] + temp98*sharedp[tile_y+8] + temp99*sharedp[tile_y+9]); } } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("----------CG iteration %d \n", iter); printf("***ap:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); printf("***shared memory content before 2rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n\n"); } #endif if(threadIdx.x == 0){ rsnew[0] = 0; } //no need to have sync before blockReduce //because there is a __syncthreads() in blockReduce //pAp=p'*Ap temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedp[k]*sharedap[k]; //temp = blockReduceSum(shared, temp); blockReduceSumWithAtomics(rsnew, temp); //sync needed, to let all atomicAdd threads completes __syncthreads(); if(threadIdx.x == 0){ //pAp = temp; //alpha=rsold/(p'*Ap); use rsnew to store pAp alpha[0] = rsold[0]/rsnew[0]; #ifdef DEBUG if(blockIdx.x==0){ printf("***rsold:\n"); printf("rsold = %f \n", rsold[0]); printf("***pAp:\n"); printf("pAp = %f \n", rsnew[0]); printf("***alpha:\n"); printf("alpha = %f \n", alpha[0]); } #endif rsnew[0] = 0; } //needed, aplpha[0] to be used by all threads __syncthreads(); for(int k = threadIdx.x; k < F; k += 64){ //x=x+alpha*p; sharedx[k] = sharedx[k] + alpha[0] * sharedp[k]; //r=r-alpha*Ap; sharedr[k] = sharedr[k] - alpha[0] * sharedap[k]; //NOT needed? //__syncthreads(); } __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content before 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif //rsnew=r'*r; temp = 0; for(int k = threadIdx.x; k < F; k += 64) temp += sharedr[k]*sharedr[k]; blockReduceSumWithAtomics(rsnew, temp); //WARN: has to have this sync! __syncthreads(); #ifdef DEBUG if(blockIdx.x==0 && threadIdx.x==0){ printf("***rsnew:\n"); printf("rsnew = %f \n", rsnew[0]); printf("***shared memory content after 3rd blockReduceSum:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } #endif if(rsnew[0]<CG_ERROR) break; //NOT needed? //__syncthreads(); //beta if(threadIdx.x == 0){ beta[0] = rsnew[0]/rsold[0]; //rsold=rsnew; rsold[0] = rsnew[0]; } //need sync since every thread needs beta[0] __syncthreads(); //p=r+(rsnew/rsold)*p; for(int k = threadIdx.x; k < F; k += 64) sharedp[k] = sharedr[k] + beta[0] * sharedp[k]; //need sync as every thread needs sharedp at the beginning of for __syncthreads(); #ifdef DEBUG __syncthreads(); if(blockIdx.x==0 && threadIdx.x==0){ printf("***shared memory content after update p:\n"); for(int i = 0; i < F; i++) printf("%f ", sharedp[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedr[i]); printf("\n"); for(int i = 0; i < F; i++) printf("%f ", sharedap[i]); printf("\n"); } __syncthreads(); #endif }//end of CG iterations //x<--sharedx for(int k = threadIdx.x; k < F; k += 64) XT[blockIdx.x*F + k] = sharedx[k]; //*/ } } void alsUpdateFeature100Host(const int batch_offset, const int* csrRowIndex, const int* csrColIndex, const float lambda, const int m, const int F, const float* __restrict__ thetaT, float* XT, float* ythetaT, int cgIter){ alsUpdateFeature100<<<m, 64, SCAN_BATCH * F/2*sizeof(float2)>>> (batch_offset, csrRowIndex, csrColIndex, lambda, m, F, thetaT, XT, ythetaT, cgIter); cudaDeviceSynchronize(); cudaCheckError(); }
249500eada2156f1c201f5bdad5999e70a6d6d53.hip
// !!! This is a file automatically generated by hipify!!! /* * Discrete Cosine Transform in row wise (DCT four) * DCT_IV_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_IV_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_IV_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DCTIV_Row_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.y + k*TILE_DIM) + 1)*PI_d*(2 * Col + 1) / (4.0 * numAColumns)))*sqrtf(2.0 / numAColumns); } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTRowFourS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); hipError_t error; int devID = 0; // get number of SMs on this GPU error = hipGetDevice(&devID); hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, devID); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice)); //hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Row_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Row_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(hipFree(deviceA)); //hipFree(deviceB); gpuErrchk(hipFree(deviceC)); return; } }
249500eada2156f1c201f5bdad5999e70a6d6d53.cu
/* * Discrete Cosine Transform in row wise (DCT four) * DCT_IV_Row * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DCT_IV_Row(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "DCT_IV_Row.cuh" #include "mex.h" #include "gpu/mxGPUArray.h" #include <cuda.h> #include <cuda_runtime.h> #include "ERRORCHK.h" // #define TILE_DIM 16 #define DEFAULT_DIM 32 // Tile dimension #define DELTA(i, j) ((i==j)?1:0) //const float PI_d = 3.141592653589793238462643383279502884f; //pi template <unsigned int TILE_DIM > __global__ void DCTIV_Row_Kernelx(float *A, float *C, int numARows, int numAColumns, int numCRows, int numCColumns) { float CValue = 0.0f; const float PI_d = 3.141592653589793238462643383279502884f; //pi int Row = blockIdx.y*TILE_DIM + threadIdx.y; int Col = blockIdx.x*TILE_DIM + threadIdx.x; __shared__ float As[TILE_DIM][TILE_DIM]; __shared__ float Bs[TILE_DIM][TILE_DIM]; for (int k = 0; k < (TILE_DIM + numAColumns - 1) / TILE_DIM; k++) { if (k*TILE_DIM + threadIdx.x < numAColumns && Row < numARows) { As[threadIdx.y][threadIdx.x] = A[Row*numAColumns + k*TILE_DIM + threadIdx.x]; } else { As[threadIdx.y][threadIdx.x] = 0.0; } if (k*TILE_DIM + threadIdx.y < numAColumns && Col < numAColumns) { Bs[threadIdx.y][threadIdx.x] = __cosf(((2 * (threadIdx.y + k*TILE_DIM) + 1)*PI_d*(2 * Col + 1) / (4.0 * numAColumns)))*sqrtf(2.0 / numAColumns); } else { Bs[threadIdx.y][threadIdx.x] = 0.0; } __syncthreads(); for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; } __syncthreads(); } if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE extern "C" void CalculateTransformDCTRowFourS(float * A, float * C, int numARows, int numAColumns, int numCRows, int numCColumns) { float * hostA = A; // The A matrix //float * hostB = B; // The B matrix float * hostC = C; // The output C matrix //float * hostComputedC; float * deviceA; //float * deviceB; float * deviceC; //hostA = (float *)malloc(sizeof(float)*numARows*numAColumns); cudaError_t error; int devID = 0; // get number of SMs on this GPU error = cudaGetDevice(&devID); cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, devID); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } int TILEDIM = (deviceProp.major < 2) ? 16 : 32; // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numAColumns; //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); // Allocating GPU memory gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns)); //cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns); gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns)); //thrust::device_ptr< float >dev_ptr_A(deviceA); //thrust::device_ptr< float >dev_ptr_C(deviceC); // Copy memory to the GPU gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice)); //cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////// unsigned int TILE_DIM=16; dim3 dimBlock; dim3 dimGrid; switch (TILEDIM){ case 16: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Row_Kernelx <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; case 32: TILE_DIM= TILEDIM; dimBlock.x=TILE_DIM; dimBlock.y=TILE_DIM; dimBlock.z=1; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DCTIV_Row_Kernelx <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns); //matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy the results in GPU memory back to the CPU gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost)); C = hostC; //thrust::device_free(dev_ptr_A); //thrust::device_free(dev_ptr_C); gpuErrchk(cudaFree(deviceA)); //cudaFree(deviceB); gpuErrchk(cudaFree(deviceC)); return; } }